Merge branch 'pci/host-generic' into next

* pci/host-generic:
  arm64: Add architectural support for PCI
  PCI: Add pci_remap_iospace() to map bus I/O resources
  of/pci: Add support for parsing PCI host bridge resources from DT
  of/pci: Add pci_get_new_domain_nr() and of_get_pci_domain_nr()
  PCI: Add generic domain handling
  of/pci: Fix the conversion of IO ranges into IO resources
  of/pci: Move of_pci_range_to_resource() to of/address.c
  ARM: Define PCI_IOBASE as the base of virtual PCI IO space
  of/pci: Add pci_register_io_range() and pci_pio_to_address()
  asm-generic/io.h: Fix ioport_map() for !CONFIG_GENERIC_IOMAP

Conflicts:
	drivers/pci/host/pci-tegra.c
diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt
index ed0d9b9..9f4faa8 100644
--- a/Documentation/devicetree/bindings/pci/designware-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt
@@ -23,3 +23,6 @@
 
 Optional properties:
 - reset-gpio: gpio pin number of power good signal
+- bus-range: PCI bus numbers covered (it is recommended for new devicetrees to
+  specify this property, to keep backwards compatibility a range of 0x00-0xff
+  is assumed if not present)
diff --git a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
index 0823362..d763e04 100644
--- a/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
+++ b/Documentation/devicetree/bindings/pci/nvidia,tegra20-pcie.txt
@@ -1,7 +1,10 @@
 NVIDIA Tegra PCIe controller
 
 Required properties:
-- compatible: "nvidia,tegra20-pcie" or "nvidia,tegra30-pcie"
+- compatible: Must be one of:
+  - "nvidia,tegra20-pcie"
+  - "nvidia,tegra30-pcie"
+  - "nvidia,tegra124-pcie"
 - device_type: Must be "pci"
 - reg: A list of physical base address and length for each set of controller
   registers. Must contain an entry for each entry in the reg-names property.
@@ -57,6 +60,11 @@
   - afi
   - pcie_x
 
+Required properties on Tegra124 and later:
+- phys: Must contain an entry for each entry in phy-names.
+- phy-names: Must include the following entries:
+  - pcie
+
 Power supplies for Tegra20:
 - avdd-pex-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
 - vdd-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
@@ -84,6 +92,21 @@
     - avdd-pexb-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
     - vdd-pexb-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
 
+Power supplies for Tegra124:
+- Required:
+  - avddio-pex-supply: Power supply for analog PCIe logic. Must supply 1.05 V.
+  - dvddio-pex-supply: Power supply for digital PCIe I/O. Must supply 1.05 V.
+  - avdd-pex-pll-supply: Power supply for dedicated (internal) PCIe PLL. Must
+    supply 1.05 V.
+  - hvdd-pex-supply: High-voltage supply for PCIe I/O and PCIe output clocks.
+    Must supply 3.3 V.
+  - hvdd-pex-pll-e-supply: High-voltage supply for PLLE (shared with USB3).
+    Must supply 3.3 V.
+  - vddio-pex-ctl-supply: Power supply for PCIe control I/O partition. Must
+    supply 2.8-3.3 V.
+  - avdd-pll-erefe-supply: Power supply for PLLE (shared with USB3). Must
+    supply 1.05 V.
+
 Root ports are defined as subnodes of the PCIe controller node.
 
 Required properties:
diff --git a/Documentation/devicetree/bindings/pci/pci-keystone.txt b/Documentation/devicetree/bindings/pci/pci-keystone.txt
new file mode 100644
index 0000000..54eae29
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/pci-keystone.txt
@@ -0,0 +1,63 @@
+TI Keystone PCIe interface
+
+Keystone PCI host Controller is based on Designware PCI h/w version 3.65.
+It shares common functions with PCIe Designware core driver and inherit
+common properties defined in
+Documentation/devicetree/bindings/pci/designware-pci.txt
+
+Please refer to Documentation/devicetree/bindings/pci/designware-pci.txt
+for the details of Designware DT bindings.  Additional properties are
+described here as well as properties that are not applicable.
+
+Required Properties:-
+
+compatibility: "ti,keystone-pcie"
+reg:	index 1 is the base address and length of DW application registers.
+	index 2 is the base address and length of PCI device ID register.
+
+pcie_msi_intc : Interrupt controller device node for MSI IRQ chip
+	interrupt-cells: should be set to 1
+	interrupt-parent: Parent interrupt controller phandle
+	interrupts: GIC interrupt lines connected to PCI MSI interrupt lines
+
+ Example:
+	pcie_msi_intc: msi-interrupt-controller {
+			interrupt-controller;
+			#interrupt-cells = <1>;
+			interrupt-parent = <&gic>;
+			interrupts = <GIC_SPI 30 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 31 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 32 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 33 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 34 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 36 IRQ_TYPE_EDGE_RISING>,
+					<GIC_SPI 37 IRQ_TYPE_EDGE_RISING>;
+	};
+
+pcie_intc: Interrupt controller device node for Legacy IRQ chip
+	interrupt-cells: should be set to 1
+	interrupt-parent: Parent interrupt controller phandle
+	interrupts: GIC interrupt lines connected to PCI Legacy interrupt lines
+
+ Example:
+	pcie_intc: legacy-interrupt-controller {
+		interrupt-controller;
+		#interrupt-cells = <1>;
+		interrupt-parent = <&gic>;
+		interrupts = <GIC_SPI 26 IRQ_TYPE_EDGE_RISING>,
+			<GIC_SPI 27 IRQ_TYPE_EDGE_RISING>,
+			<GIC_SPI 28 IRQ_TYPE_EDGE_RISING>,
+			<GIC_SPI 29 IRQ_TYPE_EDGE_RISING>;
+	};
+
+Optional properties:-
+	phys: phandle to Generic Keystone SerDes phy for PCI
+	phy-names: name of the Generic Keystine SerDes phy for PCI
+	  - If boot loader already does PCI link establishment, then phys and
+	    phy-names shouldn't be present.
+
+Designware DT Properties not applicable for Keystone PCI
+
+1. pcie_bus clock-names not used.  Instead, a phandle to phys is used.
+
diff --git a/Documentation/devicetree/bindings/pci/xilinx-pcie.txt b/Documentation/devicetree/bindings/pci/xilinx-pcie.txt
new file mode 100644
index 0000000..3e2c88d
--- /dev/null
+++ b/Documentation/devicetree/bindings/pci/xilinx-pcie.txt
@@ -0,0 +1,62 @@
+* Xilinx AXI PCIe Root Port Bridge DT description
+
+Required properties:
+- #address-cells: Address representation for root ports, set to <3>
+- #size-cells: Size representation for root ports, set to <2>
+- #interrupt-cells: specifies the number of cells needed to encode an
+	interrupt source. The value must be 1.
+- compatible: Should contain "xlnx,axi-pcie-host-1.00.a"
+- reg: Should contain AXI PCIe registers location and length
+- device_type: must be "pci"
+- interrupts: Should contain AXI PCIe interrupt
+- interrupt-map-mask,
+  interrupt-map: standard PCI properties to define the mapping of the
+	PCI interface to interrupt numbers.
+- ranges: ranges for the PCI memory regions (I/O space region is not
+	supported by hardware)
+	Please refer to the standard PCI bus binding document for a more
+	detailed explanation
+
+Optional properties:
+- bus-range: PCI bus numbers covered
+
+Interrupt controller child node
++++++++++++++++++++++++++++++++
+Required properties:
+- interrupt-controller: identifies the node as an interrupt controller
+- #address-cells: specifies the number of cells needed to encode an
+	address. The value must be 0.
+- #interrupt-cells: specifies the number of cells needed to encode an
+	interrupt source. The value must be 1.
+
+NOTE:
+The core provides a single interrupt for both INTx/MSI messages. So,
+created a interrupt controller node to support 'interrupt-map' DT
+functionality.  The driver will create an IRQ domain for this map, decode
+the four INTx interrupts in ISR and route them to this domain.
+
+
+Example:
+++++++++
+
+	pci_express: axi-pcie@50000000 {
+		#address-cells = <3>;
+		#size-cells = <2>;
+		#interrupt-cells = <1>;
+		compatible = "xlnx,axi-pcie-host-1.00.a";
+		reg = < 0x50000000 0x10000000 >;
+		device_type = "pci";
+		interrupts = < 0 52 4 >;
+		interrupt-map-mask = <0 0 0 7>;
+		interrupt-map = <0 0 0 1 &pcie_intc 1>,
+				<0 0 0 2 &pcie_intc 2>,
+				<0 0 0 3 &pcie_intc 3>,
+				<0 0 0 4 &pcie_intc 4>;
+		ranges = < 0x02000000 0 0x60000000 0x60000000 0 0x10000000 >;
+
+		pcie_intc: interrupt-controller {
+			interrupt-controller;
+			#address-cells = <0>;
+			#interrupt-cells = <1>;
+		}
+	};
diff --git a/Documentation/driver-model/devres.txt b/Documentation/driver-model/devres.txt
index d14710b..befc3fe 100644
--- a/Documentation/driver-model/devres.txt
+++ b/Documentation/driver-model/devres.txt
@@ -264,8 +264,10 @@
 IO region
   devm_release_mem_region()
   devm_release_region()
+  devm_release_resource()
   devm_request_mem_region()
   devm_request_region()
+  devm_request_resource()
 
 IOMAP
   devm_ioport_map()
diff --git a/MAINTAINERS b/MAINTAINERS
index 1ff06de..07fd7e2 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -6870,12 +6870,19 @@
 
 PCI DRIVER FOR IMX6
 M:	Richard Zhu <r65037@freescale.com>
-M:	Shawn Guo <shawn.guo@freescale.com>
+M:	Lucas Stach <l.stach@pengutronix.de>
 L:	linux-pci@vger.kernel.org
 L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:	Maintained
 F:	drivers/pci/host/*imx6*
 
+PCI DRIVER FOR TI KEYSTONE
+M:	Murali Karicheri <m-karicheri2@ti.com>
+L:	linux-pci@vger.kernel.org
+L:	linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:	Maintained
+F:	drivers/pci/host/*keystone*
+
 PCI DRIVER FOR MVEBU (Marvell Armada 370 and Armada XP SOC support)
 M:	Thomas Petazzoni <thomas.petazzoni@free-electrons.com>
 M:	Jason Cooper <jason@lakedaemon.net>
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
index fa5f2bb..9d34292 100644
--- a/arch/arm/boot/dts/spear1310.dtsi
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -85,7 +85,8 @@
 
 		pcie0: pcie@b1000000 {
 			compatible = "st,spear1340-pcie", "snps,dw-pcie";
-			reg = <0xb1000000 0x4000>;
+			reg = <0xb1000000 0x4000>, <0x80000000 0x20000>;
+			reg-names = "dbi", "config";
 			interrupts = <0 68 0x4>;
 			interrupt-map-mask = <0 0 0 0>;
 			interrupt-map = <0x0 0 &gic 0 68 0x4>;
@@ -95,15 +96,15 @@
 			#address-cells = <3>;
 			#size-cells = <2>;
 			device_type = "pci";
-			ranges = <0x00000800 0 0x80000000 0x80000000 0 0x00020000   /* configuration space */
-				0x81000000 0 0	 0x80020000 0 0x00010000   /* downstream I/O */
+			ranges = <0x81000000 0 0	 0x80020000 0 0x00010000   /* downstream I/O */
 				0x82000000 0 0x80030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */
 			status = "disabled";
 		};
 
 		pcie1: pcie@b1800000 {
 			compatible = "st,spear1340-pcie", "snps,dw-pcie";
-			reg = <0xb1800000 0x4000>;
+			reg = <0xb1800000 0x4000>, <0x90000000 0x20000>;
+			reg-names = "dbi", "config";
 			interrupts = <0 69 0x4>;
 			interrupt-map-mask = <0 0 0 0>;
 			interrupt-map = <0x0 0 &gic 0 69 0x4>;
@@ -113,15 +114,15 @@
 			#address-cells = <3>;
 			#size-cells = <2>;
 			device_type = "pci";
-			ranges = <0x00000800 0 0x90000000 0x90000000 0 0x00020000   /* configuration space */
-				0x81000000 0 0  0x90020000 0 0x00010000   /* downstream I/O */
+			ranges = <0x81000000 0 0  0x90020000 0 0x00010000   /* downstream I/O */
 				0x82000000 0 0x90030000 0x90030000 0 0x0ffd0000>; /* non-prefetchable memory */
 			status = "disabled";
 		};
 
 		pcie2: pcie@b4000000 {
 			compatible = "st,spear1340-pcie", "snps,dw-pcie";
-			reg = <0xb4000000 0x4000>;
+			reg = <0xb4000000 0x4000>, <0xc0000000 0x20000>;
+			reg-names = "dbi", "config";
 			interrupts = <0 70 0x4>;
 			interrupt-map-mask = <0 0 0 0>;
 			interrupt-map = <0x0 0 &gic 0 70 0x4>;
@@ -131,8 +132,7 @@
 			#address-cells = <3>;
 			#size-cells = <2>;
 			device_type = "pci";
-			ranges = <0x00000800 0 0xc0000000 0xc0000000 0 0x00020000   /* configuration space */
-				0x81000000 0 0	 0xc0020000 0 0x00010000   /* downstream I/O */
+			ranges = <0x81000000 0 0	 0xc0020000 0 0x00010000   /* downstream I/O */
 				0x82000000 0 0xc0030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */
 			status = "disabled";
 		};
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
index e71df0f..13e1aa3 100644
--- a/arch/arm/boot/dts/spear1340.dtsi
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -50,7 +50,8 @@
 
 		pcie0: pcie@b1000000 {
 			compatible = "st,spear1340-pcie", "snps,dw-pcie";
-			reg = <0xb1000000 0x4000>;
+			reg = <0xb1000000 0x4000>, <0x80000000 0x20000>;
+			reg-names = "dbi", "config";
 			interrupts = <0 68 0x4>;
 			interrupt-map-mask = <0 0 0 0>;
 			interrupt-map = <0x0 0 &gic 0 68 0x4>;
@@ -60,8 +61,7 @@
 			#address-cells = <3>;
 			#size-cells = <2>;
 			device_type = "pci";
-			ranges = <0x00000800 0 0x80000000 0x80000000 0 0x00020000   /* configuration space */
-				0x81000000 0 0	 0x80020000 0 0x00010000   /* downstream I/O */
+			ranges = <0x81000000 0 0	 0x80020000 0 0x00010000   /* downstream I/O */
 				0x82000000 0 0x80030000 0xc0030000 0 0x0ffd0000>; /* non-prefetchable memory */
 			status = "disabled";
 		};
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 059a76c..7b20bcc 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -81,14 +81,14 @@
  */
 DEFINE_RAW_SPINLOCK(pci_config_lock);
 
-static int can_skip_ioresource_align(const struct dmi_system_id *d)
+static int __init can_skip_ioresource_align(const struct dmi_system_id *d)
 {
 	pci_probe |= PCI_CAN_SKIP_ISA_ALIGN;
 	printk(KERN_INFO "PCI: %s detected, can skip ISA alignment\n", d->ident);
 	return 0;
 }
 
-static const struct dmi_system_id can_skip_pciprobe_dmi_table[] = {
+static const struct dmi_system_id can_skip_pciprobe_dmi_table[] __initconst = {
 /*
  * Systems where PCI IO resource ISA alignment can be skipped
  * when the ISA enable bit in the bridge control is not set
@@ -186,7 +186,7 @@
  * on the kernel command line (which was parsed earlier).
  */
 
-static int set_bf_sort(const struct dmi_system_id *d)
+static int __init set_bf_sort(const struct dmi_system_id *d)
 {
 	if (pci_bf_sort == pci_bf_sort_default) {
 		pci_bf_sort = pci_dmi_bf;
@@ -195,8 +195,8 @@
 	return 0;
 }
 
-static void read_dmi_type_b1(const struct dmi_header *dm,
-				       void *private_data)
+static void __init read_dmi_type_b1(const struct dmi_header *dm,
+				    void *private_data)
 {
 	u8 *d = (u8 *)dm + 4;
 
@@ -217,7 +217,7 @@
 	}
 }
 
-static int find_sort_method(const struct dmi_system_id *d)
+static int __init find_sort_method(const struct dmi_system_id *d)
 {
 	dmi_walk(read_dmi_type_b1, NULL);
 
@@ -232,7 +232,7 @@
  * Enable renumbering of PCI bus# ranges to reach all PCI busses (Cardbus)
  */
 #ifdef __i386__
-static int assign_all_busses(const struct dmi_system_id *d)
+static int __init assign_all_busses(const struct dmi_system_id *d)
 {
 	pci_probe |= PCI_ASSIGN_ALL_BUSSES;
 	printk(KERN_INFO "%s detected: enabling PCI bus# renumbering"
@@ -241,7 +241,7 @@
 }
 #endif
 
-static int set_scan_all(const struct dmi_system_id *d)
+static int __init set_scan_all(const struct dmi_system_id *d)
 {
 	printk(KERN_INFO "PCI: %s detected, enabling pci=pcie_scan_all\n",
 	       d->ident);
@@ -249,7 +249,7 @@
 	return 0;
 }
 
-static const struct dmi_system_id pciprobe_dmi_table[] = {
+static const struct dmi_system_id pciprobe_dmi_table[] __initconst = {
 #ifdef __i386__
 /*
  * Laptops which need pci=assign-busses to see Cardbus cards
@@ -512,7 +512,7 @@
 	return 0;
 }
 
-char * __init pcibios_setup(char *str)
+char *__init pcibios_setup(char *str)
 {
 	if (!strcmp(str, "off")) {
 		pci_probe = 0;
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 248642f..326198a 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -31,7 +31,7 @@
 
 LIST_HEAD(pci_mmcfg_list);
 
-static __init void pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
+static void __init pci_mmconfig_remove(struct pci_mmcfg_region *cfg)
 {
 	if (cfg->res.parent)
 		release_resource(&cfg->res);
@@ -39,7 +39,7 @@
 	kfree(cfg);
 }
 
-static __init void free_all_mmcfg(void)
+static void __init free_all_mmcfg(void)
 {
 	struct pci_mmcfg_region *cfg, *tmp;
 
@@ -93,7 +93,7 @@
 	return new;
 }
 
-static __init struct pci_mmcfg_region *pci_mmconfig_add(int segment, int start,
+static struct pci_mmcfg_region *__init pci_mmconfig_add(int segment, int start,
 							int end, u64 addr)
 {
 	struct pci_mmcfg_region *new;
@@ -125,7 +125,7 @@
 	return NULL;
 }
 
-static const char __init *pci_mmcfg_e7520(void)
+static const char *__init pci_mmcfg_e7520(void)
 {
 	u32 win;
 	raw_pci_ops->read(0, 0, PCI_DEVFN(0, 0), 0xce, 2, &win);
@@ -140,7 +140,7 @@
 	return "Intel Corporation E7520 Memory Controller Hub";
 }
 
-static const char __init *pci_mmcfg_intel_945(void)
+static const char *__init pci_mmcfg_intel_945(void)
 {
 	u32 pciexbar, mask = 0, len = 0;
 
@@ -184,7 +184,7 @@
 	return "Intel Corporation 945G/GZ/P/PL Express Memory Controller Hub";
 }
 
-static const char __init *pci_mmcfg_amd_fam10h(void)
+static const char *__init pci_mmcfg_amd_fam10h(void)
 {
 	u32 low, high, address;
 	u64 base, msr;
@@ -235,21 +235,25 @@
 }
 
 static bool __initdata mcp55_checked;
-static const char __init *pci_mmcfg_nvidia_mcp55(void)
+static const char *__init pci_mmcfg_nvidia_mcp55(void)
 {
 	int bus;
 	int mcp55_mmconf_found = 0;
 
-	static const u32 extcfg_regnum		= 0x90;
-	static const u32 extcfg_regsize		= 4;
-	static const u32 extcfg_enable_mask	= 1<<31;
-	static const u32 extcfg_start_mask	= 0xff<<16;
-	static const int extcfg_start_shift	= 16;
-	static const u32 extcfg_size_mask	= 0x3<<28;
-	static const int extcfg_size_shift	= 28;
-	static const int extcfg_sizebus[]	= {0x100, 0x80, 0x40, 0x20};
-	static const u32 extcfg_base_mask[]	= {0x7ff8, 0x7ffc, 0x7ffe, 0x7fff};
-	static const int extcfg_base_lshift	= 25;
+	static const u32 extcfg_regnum __initconst	= 0x90;
+	static const u32 extcfg_regsize __initconst	= 4;
+	static const u32 extcfg_enable_mask __initconst	= 1 << 31;
+	static const u32 extcfg_start_mask __initconst	= 0xff << 16;
+	static const int extcfg_start_shift __initconst	= 16;
+	static const u32 extcfg_size_mask __initconst	= 0x3 << 28;
+	static const int extcfg_size_shift __initconst	= 28;
+	static const int extcfg_sizebus[] __initconst	= {
+		0x100, 0x80, 0x40, 0x20
+	};
+	static const u32 extcfg_base_mask[] __initconst	= {
+		0x7ff8, 0x7ffc, 0x7ffe, 0x7fff
+	};
+	static const int extcfg_base_lshift __initconst	= 25;
 
 	/*
 	 * do check if amd fam10h already took over
@@ -302,7 +306,7 @@
 	const char *(*probe)(void);
 };
 
-static struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initdata = {
+static const struct pci_mmcfg_hostbridge_probe pci_mmcfg_probes[] __initconst = {
 	{ 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
 	  PCI_DEVICE_ID_INTEL_E7520_MCH, pci_mmcfg_e7520 },
 	{ 0, PCI_DEVFN(0, 0), PCI_VENDOR_ID_INTEL,
diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
index c77b24a..9b83b90 100644
--- a/arch/x86/pci/pcbios.c
+++ b/arch/x86/pci/pcbios.c
@@ -79,13 +79,13 @@
 static struct {
 	unsigned long address;
 	unsigned short segment;
-} bios32_indirect = { 0, __KERNEL_CS };
+} bios32_indirect __initdata = { 0, __KERNEL_CS };
 
 /*
  * Returns the entry point for the given service, NULL on error
  */
 
-static unsigned long bios32_service(unsigned long service)
+static unsigned long __init bios32_service(unsigned long service)
 {
 	unsigned char return_code;	/* %al */
 	unsigned long address;		/* %ebx */
@@ -124,7 +124,7 @@
 
 static int pci_bios_present;
 
-static int check_pcibios(void)
+static int __init check_pcibios(void)
 {
 	u32 signature, eax, ebx, ecx;
 	u8 status, major_ver, minor_ver, hw_mech;
@@ -312,7 +312,7 @@
  * Try to find PCI BIOS.
  */
 
-static const struct pci_raw_ops *pci_find_bios(void)
+static const struct pci_raw_ops *__init pci_find_bios(void)
 {
 	union bios32 *check;
 	unsigned char sum;
diff --git a/drivers/gpu/drm/vmwgfx/svga_reg.h b/drivers/gpu/drm/vmwgfx/svga_reg.h
index 11323dd..e4259c2 100644
--- a/drivers/gpu/drm/vmwgfx/svga_reg.h
+++ b/drivers/gpu/drm/vmwgfx/svga_reg.h
@@ -35,7 +35,6 @@
 /*
  * PCI device IDs.
  */
-#define PCI_VENDOR_ID_VMWARE            0x15AD
 #define PCI_DEVICE_ID_VMWARE_SVGA2      0x0405
 
 /*
diff --git a/drivers/misc/vmw_vmci/vmci_guest.c b/drivers/misc/vmw_vmci/vmci_guest.c
index 248399a..189b325 100644
--- a/drivers/misc/vmw_vmci/vmci_guest.c
+++ b/drivers/misc/vmw_vmci/vmci_guest.c
@@ -35,7 +35,6 @@
 #include "vmci_driver.h"
 #include "vmci_event.h"
 
-#define PCI_VENDOR_ID_VMWARE		0x15AD
 #define PCI_DEVICE_ID_VMWARE_VMCI	0x0740
 
 #define VMCI_UTIL_NUM_RESOURCES 1
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 29ee77f2..c388ef5 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -117,7 +117,6 @@
 /*
  * PCI vendor and device IDs.
  */
-#define PCI_VENDOR_ID_VMWARE            0x15AD
 #define PCI_DEVICE_ID_VMWARE_VMXNET3    0x07B0
 #define MAX_ETHERNET_CARDS		10
 #define MAX_PCI_PASSTHRU_DEVICE		6
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 8922c37..34134d6 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -63,4 +63,23 @@
 	help
 	  Say Y here if you want PCIe support on SPEAr13XX SoCs.
 
+
+config PCI_KEYSTONE
+	bool "TI Keystone PCIe controller"
+	depends on ARCH_KEYSTONE
+	select PCIE_DW
+	select PCIEPORTBUS
+	help
+	  Say Y here if you want to enable PCI controller support on Keystone
+	  SoCs. The PCI controller on Keystone is based on Designware hardware
+	  and therefore the driver re-uses the Designware core functions to
+	  implement the driver.
+
+config PCIE_XILINX
+	bool "Xilinx AXI PCIe host bridge support"
+	depends on ARCH_ZYNQ
+	help
+	  Say 'Y' here if you want kernel to support the Xilinx AXI PCIe
+	  Host Bridge driver.
+
 endmenu
diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile
index d0e88f1..182929c 100644
--- a/drivers/pci/host/Makefile
+++ b/drivers/pci/host/Makefile
@@ -8,3 +8,5 @@
 obj-$(CONFIG_PCI_RCAR_GEN2_PCIE) += pcie-rcar.o
 obj-$(CONFIG_PCI_HOST_GENERIC) += pci-host-generic.o
 obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o
+obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o
+obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o
diff --git a/drivers/pci/host/pci-imx6.c b/drivers/pci/host/pci-imx6.c
index a568efa..233fe8a 100644
--- a/drivers/pci/host/pci-imx6.c
+++ b/drivers/pci/host/pci-imx6.c
@@ -49,6 +49,9 @@
 
 /* PCIe Port Logic registers (memory-mapped) */
 #define PL_OFFSET 0x700
+#define PCIE_PL_PFLR (PL_OFFSET + 0x08)
+#define PCIE_PL_PFLR_LINK_STATE_MASK		(0x3f << 16)
+#define PCIE_PL_PFLR_FORCE_LINK			(1 << 15)
 #define PCIE_PHY_DEBUG_R0 (PL_OFFSET + 0x28)
 #define PCIE_PHY_DEBUG_R1 (PL_OFFSET + 0x2c)
 #define PCIE_PHY_DEBUG_R1_XMLH_LINK_IN_TRAINING	(1 << 29)
@@ -214,6 +217,32 @@
 static int imx6_pcie_assert_core_reset(struct pcie_port *pp)
 {
 	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
+	u32 val, gpr1, gpr12;
+
+	/*
+	 * If the bootloader already enabled the link we need some special
+	 * handling to get the core back into a state where it is safe to
+	 * touch it for configuration.  As there is no dedicated reset signal
+	 * wired up for MX6QDL, we need to manually force LTSSM into "detect"
+	 * state before completely disabling LTSSM, which is a prerequisite
+	 * for core configuration.
+	 *
+	 * If both LTSSM_ENABLE and REF_SSP_ENABLE are active we have a strong
+	 * indication that the bootloader activated the link.
+	 */
+	regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1, &gpr1);
+	regmap_read(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12, &gpr12);
+
+	if ((gpr1 & IMX6Q_GPR1_PCIE_REF_CLK_EN) &&
+	    (gpr12 & IMX6Q_GPR12_PCIE_CTL_2)) {
+		val = readl(pp->dbi_base + PCIE_PL_PFLR);
+		val &= ~PCIE_PL_PFLR_LINK_STATE_MASK;
+		val |= PCIE_PL_PFLR_FORCE_LINK;
+		writel(val, pp->dbi_base + PCIE_PL_PFLR);
+
+		regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR12,
+				IMX6Q_GPR12_PCIE_CTL_2, 0 << 10);
+	}
 
 	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
 			IMX6Q_GPR1_PCIE_TEST_PD, 1 << 18);
@@ -228,11 +257,6 @@
 	struct imx6_pcie *imx6_pcie = to_imx6_pcie(pp);
 	int ret;
 
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
-			IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
-	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
-			IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
-
 	ret = clk_prepare_enable(imx6_pcie->pcie_phy);
 	if (ret) {
 		dev_err(pp->dev, "unable to enable pcie_phy clock\n");
@@ -254,6 +278,12 @@
 	/* allow the clocks to stabilize */
 	usleep_range(200, 500);
 
+	/* power up core phy and enable ref clock */
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+			IMX6Q_GPR1_PCIE_TEST_PD, 0 << 18);
+	regmap_update_bits(imx6_pcie->iomuxc_gpr, IOMUXC_GPR1,
+			IMX6Q_GPR1_PCIE_REF_CLK_EN, 1 << 16);
+
 	/* Some boards don't have PCIe reset GPIO. */
 	if (gpio_is_valid(imx6_pcie->reset_gpio)) {
 		gpio_set_value(imx6_pcie->reset_gpio, 0);
@@ -589,6 +619,14 @@
 	return 0;
 }
 
+static void imx6_pcie_shutdown(struct platform_device *pdev)
+{
+	struct imx6_pcie *imx6_pcie = platform_get_drvdata(pdev);
+
+	/* bring down link, so bootloader gets clean state in case of reboot */
+	imx6_pcie_assert_core_reset(&imx6_pcie->pp);
+}
+
 static const struct of_device_id imx6_pcie_of_match[] = {
 	{ .compatible = "fsl,imx6q-pcie", },
 	{},
@@ -601,6 +639,7 @@
 		.owner	= THIS_MODULE,
 		.of_match_table = imx6_pcie_of_match,
 	},
+	.shutdown = imx6_pcie_shutdown,
 };
 
 /* Freescale PCIe driver does not allow module unload */
@@ -609,7 +648,7 @@
 {
 	return platform_driver_probe(&imx6_pcie_driver, imx6_pcie_probe);
 }
-fs_initcall(imx6_pcie_init);
+module_init(imx6_pcie_init);
 
 MODULE_AUTHOR("Sean Cross <xobs@kosagi.com>");
 MODULE_DESCRIPTION("Freescale i.MX6 PCIe host controller driver");
diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c
new file mode 100644
index 0000000..34086ce
--- /dev/null
+++ b/drivers/pci/host/pci-keystone-dw.c
@@ -0,0 +1,516 @@
+/*
+ * Designware application register space functions for Keystone PCI controller
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ *		http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#include "pcie-designware.h"
+#include "pci-keystone.h"
+
+/* Application register defines */
+#define LTSSM_EN_VAL		        1
+#define LTSSM_STATE_MASK		0x1f
+#define LTSSM_STATE_L0			0x11
+#define DBI_CS2_EN_VAL			0x20
+#define OB_XLAT_EN_VAL		        2
+
+/* Application registers */
+#define CMD_STATUS			0x004
+#define CFG_SETUP			0x008
+#define OB_SIZE				0x030
+#define CFG_PCIM_WIN_SZ_IDX		3
+#define CFG_PCIM_WIN_CNT		32
+#define SPACE0_REMOTE_CFG_OFFSET	0x1000
+#define OB_OFFSET_INDEX(n)		(0x200 + (8 * n))
+#define OB_OFFSET_HI(n)			(0x204 + (8 * n))
+
+/* IRQ register defines */
+#define IRQ_EOI				0x050
+#define IRQ_STATUS			0x184
+#define IRQ_ENABLE_SET			0x188
+#define IRQ_ENABLE_CLR			0x18c
+
+#define MSI_IRQ				0x054
+#define MSI0_IRQ_STATUS			0x104
+#define MSI0_IRQ_ENABLE_SET		0x108
+#define MSI0_IRQ_ENABLE_CLR		0x10c
+#define IRQ_STATUS			0x184
+#define MSI_IRQ_OFFSET			4
+
+/* Config space registers */
+#define DEBUG0				0x728
+
+#define to_keystone_pcie(x)	container_of(x, struct keystone_pcie, pp)
+
+static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
+{
+	return sys->private_data;
+}
+
+static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset,
+					     u32 *bit_pos)
+{
+	*reg_offset = offset % 8;
+	*bit_pos = offset >> 3;
+}
+
+u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+	return ks_pcie->app.start + MSI_IRQ;
+}
+
+void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset)
+{
+	struct pcie_port *pp = &ks_pcie->pp;
+	u32 pending, vector;
+	int src, virq;
+
+	pending = readl(ks_pcie->va_app_base + MSI0_IRQ_STATUS + (offset << 4));
+
+	/*
+	 * MSI0 status bit 0-3 shows vectors 0, 8, 16, 24, MSI1 status bit
+	 * shows 1, 9, 17, 25 and so forth
+	 */
+	for (src = 0; src < 4; src++) {
+		if (BIT(src) & pending) {
+			vector = offset + (src << 3);
+			virq = irq_linear_revmap(pp->irq_domain, vector);
+			dev_dbg(pp->dev, "irq: bit %d, vector %d, virq %d\n",
+				src, vector, virq);
+			generic_handle_irq(virq);
+		}
+	}
+}
+
+static void ks_dw_pcie_msi_irq_ack(struct irq_data *d)
+{
+	u32 offset, reg_offset, bit_pos;
+	struct keystone_pcie *ks_pcie;
+	unsigned int irq = d->irq;
+	struct msi_desc *msi;
+	struct pcie_port *pp;
+
+	msi = irq_get_msi_desc(irq);
+	pp = sys_to_pcie(msi->dev->bus->sysdata);
+	ks_pcie = to_keystone_pcie(pp);
+	offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+	update_reg_offset_bit_pos(offset, &reg_offset, &bit_pos);
+
+	writel(BIT(bit_pos),
+	       ks_pcie->va_app_base + MSI0_IRQ_STATUS + (reg_offset << 4));
+	writel(reg_offset + MSI_IRQ_OFFSET, ks_pcie->va_app_base + IRQ_EOI);
+}
+
+void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq)
+{
+	u32 reg_offset, bit_pos;
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+	writel(BIT(bit_pos),
+	       ks_pcie->va_app_base + MSI0_IRQ_ENABLE_SET + (reg_offset << 4));
+}
+
+void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq)
+{
+	u32 reg_offset, bit_pos;
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+	update_reg_offset_bit_pos(irq, &reg_offset, &bit_pos);
+	writel(BIT(bit_pos),
+	       ks_pcie->va_app_base + MSI0_IRQ_ENABLE_CLR + (reg_offset << 4));
+}
+
+static void ks_dw_pcie_msi_irq_mask(struct irq_data *d)
+{
+	struct keystone_pcie *ks_pcie;
+	unsigned int irq = d->irq;
+	struct msi_desc *msi;
+	struct pcie_port *pp;
+	u32 offset;
+
+	msi = irq_get_msi_desc(irq);
+	pp = sys_to_pcie(msi->dev->bus->sysdata);
+	ks_pcie = to_keystone_pcie(pp);
+	offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+
+	/* Mask the end point if PVM implemented */
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		if (msi->msi_attrib.maskbit)
+			mask_msi_irq(d);
+	}
+
+	ks_dw_pcie_msi_clear_irq(pp, offset);
+}
+
+static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d)
+{
+	struct keystone_pcie *ks_pcie;
+	unsigned int irq = d->irq;
+	struct msi_desc *msi;
+	struct pcie_port *pp;
+	u32 offset;
+
+	msi = irq_get_msi_desc(irq);
+	pp = sys_to_pcie(msi->dev->bus->sysdata);
+	ks_pcie = to_keystone_pcie(pp);
+	offset = irq - irq_linear_revmap(pp->irq_domain, 0);
+
+	/* Mask the end point if PVM implemented */
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		if (msi->msi_attrib.maskbit)
+			unmask_msi_irq(d);
+	}
+
+	ks_dw_pcie_msi_set_irq(pp, offset);
+}
+
+static struct irq_chip ks_dw_pcie_msi_irq_chip = {
+	.name = "Keystone-PCIe-MSI-IRQ",
+	.irq_ack = ks_dw_pcie_msi_irq_ack,
+	.irq_mask = ks_dw_pcie_msi_irq_mask,
+	.irq_unmask = ks_dw_pcie_msi_irq_unmask,
+};
+
+static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+			      irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip,
+				 handle_level_irq);
+	irq_set_chip_data(irq, domain->host_data);
+	set_irq_flags(irq, IRQF_VALID);
+
+	return 0;
+}
+
+const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = {
+	.map = ks_dw_pcie_msi_map,
+};
+
+int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+	int i;
+
+	pp->irq_domain = irq_domain_add_linear(ks_pcie->msi_intc_np,
+					MAX_MSI_IRQS,
+					&ks_dw_pcie_msi_domain_ops,
+					chip);
+	if (!pp->irq_domain) {
+		dev_err(pp->dev, "irq domain init failed\n");
+		return -ENXIO;
+	}
+
+	for (i = 0; i < MAX_MSI_IRQS; i++)
+		irq_create_mapping(pp->irq_domain, i);
+
+	return 0;
+}
+
+void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie)
+{
+	int i;
+
+	for (i = 0; i < MAX_LEGACY_IRQS; i++)
+		writel(0x1, ks_pcie->va_app_base + IRQ_ENABLE_SET + (i << 4));
+}
+
+void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset)
+{
+	struct pcie_port *pp = &ks_pcie->pp;
+	u32 pending;
+	int virq;
+
+	pending = readl(ks_pcie->va_app_base + IRQ_STATUS + (offset << 4));
+
+	if (BIT(0) & pending) {
+		virq = irq_linear_revmap(ks_pcie->legacy_irq_domain, offset);
+		dev_dbg(pp->dev, ": irq: irq_offset %d, virq %d\n", offset,
+			virq);
+		generic_handle_irq(virq);
+	}
+
+	/* EOI the INTx interrupt */
+	writel(offset, ks_pcie->va_app_base + IRQ_EOI);
+}
+
+static void ks_dw_pcie_ack_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_dw_pcie_mask_legacy_irq(struct irq_data *d)
+{
+}
+
+static void ks_dw_pcie_unmask_legacy_irq(struct irq_data *d)
+{
+}
+
+static struct irq_chip ks_dw_pcie_legacy_irq_chip = {
+	.name = "Keystone-PCI-Legacy-IRQ",
+	.irq_ack = ks_dw_pcie_ack_legacy_irq,
+	.irq_mask = ks_dw_pcie_mask_legacy_irq,
+	.irq_unmask = ks_dw_pcie_unmask_legacy_irq,
+};
+
+static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d,
+				unsigned int irq, irq_hw_number_t hw_irq)
+{
+	irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip,
+				 handle_level_irq);
+	irq_set_chip_data(irq, d->host_data);
+	set_irq_flags(irq, IRQF_VALID);
+
+	return 0;
+}
+
+static const struct irq_domain_ops ks_dw_pcie_legacy_irq_domain_ops = {
+	.map = ks_dw_pcie_init_legacy_irq_map,
+	.xlate = irq_domain_xlate_onetwocell,
+};
+
+/**
+ * ks_dw_pcie_set_dbi_mode() - Set DBI mode to access overlaid BAR mask
+ * registers
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_dw_pcie_set_dbi_mode(void __iomem *reg_virt)
+{
+	u32 val;
+
+	writel(DBI_CS2_EN_VAL | readl(reg_virt + CMD_STATUS),
+	       reg_virt + CMD_STATUS);
+
+	do {
+		val = readl(reg_virt + CMD_STATUS);
+	} while (!(val & DBI_CS2_EN_VAL));
+}
+
+/**
+ * ks_dw_pcie_clear_dbi_mode() - Disable DBI mode
+ *
+ * Since modification of dbi_cs2 involves different clock domain, read the
+ * status back to ensure the transition is complete.
+ */
+static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt)
+{
+	u32 val;
+
+	writel(~DBI_CS2_EN_VAL & readl(reg_virt + CMD_STATUS),
+		     reg_virt + CMD_STATUS);
+
+	do {
+		val = readl(reg_virt + CMD_STATUS);
+	} while (val & DBI_CS2_EN_VAL);
+}
+
+void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie)
+{
+	struct pcie_port *pp = &ks_pcie->pp;
+	u32 start = pp->mem.start, end = pp->mem.end;
+	int i, tr_size;
+
+	/* Disable BARs for inbound access */
+	ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
+	writel(0, pp->dbi_base + PCI_BASE_ADDRESS_0);
+	writel(0, pp->dbi_base + PCI_BASE_ADDRESS_1);
+	ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+
+	/* Set outbound translation size per window division */
+	writel(CFG_PCIM_WIN_SZ_IDX & 0x7, ks_pcie->va_app_base + OB_SIZE);
+
+	tr_size = (1 << (CFG_PCIM_WIN_SZ_IDX & 0x7)) * SZ_1M;
+
+	/* Using Direct 1:1 mapping of RC <-> PCI memory space */
+	for (i = 0; (i < CFG_PCIM_WIN_CNT) && (start < end); i++) {
+		writel(start | 1, ks_pcie->va_app_base + OB_OFFSET_INDEX(i));
+		writel(0, ks_pcie->va_app_base + OB_OFFSET_HI(i));
+		start += tr_size;
+	}
+
+	/* Enable OB translation */
+	writel(OB_XLAT_EN_VAL | readl(ks_pcie->va_app_base + CMD_STATUS),
+	       ks_pcie->va_app_base + CMD_STATUS);
+}
+
+/**
+ * ks_pcie_cfg_setup() - Set up configuration space address for a device
+ *
+ * @ks_pcie: ptr to keystone_pcie structure
+ * @bus: Bus number the device is residing on
+ * @devfn: device, function number info
+ *
+ * Forms and returns the address of configuration space mapped in PCIESS
+ * address space 0.  Also configures CFG_SETUP for remote configuration space
+ * access.
+ *
+ * The address space has two regions to access configuration - local and remote.
+ * We access local region for bus 0 (as RC is attached on bus 0) and remote
+ * region for others with TYPE 1 access when bus > 1.  As for device on bus = 1,
+ * we will do TYPE 0 access as it will be on our secondary bus (logical).
+ * CFG_SETUP is needed only for remote configuration access.
+ */
+static void __iomem *ks_pcie_cfg_setup(struct keystone_pcie *ks_pcie, u8 bus,
+				       unsigned int devfn)
+{
+	u8 device = PCI_SLOT(devfn), function = PCI_FUNC(devfn);
+	struct pcie_port *pp = &ks_pcie->pp;
+	u32 regval;
+
+	if (bus == 0)
+		return pp->dbi_base;
+
+	regval = (bus << 16) | (device << 8) | function;
+
+	/*
+	 * Since Bus#1 will be a virtual bus, we need to have TYPE0
+	 * access only.
+	 * TYPE 1
+	 */
+	if (bus != 1)
+		regval |= BIT(24);
+
+	writel(regval, ks_pcie->va_app_base + CFG_SETUP);
+	return pp->va_cfg0_base;
+}
+
+int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+			     unsigned int devfn, int where, int size, u32 *val)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+	u8 bus_num = bus->number;
+	void __iomem *addr;
+
+	addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
+
+	return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val);
+}
+
+int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+			     unsigned int devfn, int where, int size, u32 val)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+	u8 bus_num = bus->number;
+	void __iomem *addr;
+
+	addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn);
+
+	return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val);
+}
+
+/**
+ * ks_dw_pcie_v3_65_scan_bus() - keystone scan_bus post initialization
+ *
+ * This sets BAR0 to enable inbound access for MSI_IRQ register
+ */
+void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+
+	/* Configure and set up BAR0 */
+	ks_dw_pcie_set_dbi_mode(ks_pcie->va_app_base);
+
+	/* Enable BAR0 */
+	writel(1, pp->dbi_base + PCI_BASE_ADDRESS_0);
+	writel(SZ_4K - 1, pp->dbi_base + PCI_BASE_ADDRESS_0);
+
+	ks_dw_pcie_clear_dbi_mode(ks_pcie->va_app_base);
+
+	 /*
+	  * For BAR0, just setting bus address for inbound writes (MSI) should
+	  * be sufficient.  Use physical address to avoid any conflicts.
+	  */
+	writel(ks_pcie->app.start, pp->dbi_base + PCI_BASE_ADDRESS_0);
+}
+
+/**
+ * ks_dw_pcie_link_up() - Check if link up
+ */
+int ks_dw_pcie_link_up(struct pcie_port *pp)
+{
+	u32 val = readl(pp->dbi_base + DEBUG0);
+
+	return (val & LTSSM_STATE_MASK) == LTSSM_STATE_L0;
+}
+
+void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie)
+{
+	u32 val;
+
+	/* Disable Link training */
+	val = readl(ks_pcie->va_app_base + CMD_STATUS);
+	val &= ~LTSSM_EN_VAL;
+	writel(LTSSM_EN_VAL | val,  ks_pcie->va_app_base + CMD_STATUS);
+
+	/* Initiate Link Training */
+	val = readl(ks_pcie->va_app_base + CMD_STATUS);
+	writel(LTSSM_EN_VAL | val,  ks_pcie->va_app_base + CMD_STATUS);
+}
+
+/**
+ * ks_dw_pcie_host_init() - initialize host for v3_65 dw hardware
+ *
+ * Ioremap the register resources, initialize legacy irq domain
+ * and call dw_pcie_v3_65_host_init() API to initialize the Keystone
+ * PCI host controller.
+ */
+int __init ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
+				struct device_node *msi_intc_np)
+{
+	struct pcie_port *pp = &ks_pcie->pp;
+	struct platform_device *pdev = to_platform_device(pp->dev);
+	struct resource *res;
+
+	/* Index 0 is the config reg. space address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	pp->dbi_base = devm_ioremap_resource(pp->dev, res);
+	if (IS_ERR(pp->dbi_base))
+		return PTR_ERR(pp->dbi_base);
+
+	/*
+	 * We set these same and is used in pcie rd/wr_other_conf
+	 * functions
+	 */
+	pp->va_cfg0_base = pp->dbi_base + SPACE0_REMOTE_CFG_OFFSET;
+	pp->va_cfg1_base = pp->va_cfg0_base;
+
+	/* Index 1 is the application reg. space address */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
+	ks_pcie->app = *res;
+	ks_pcie->va_app_base = devm_ioremap_resource(pp->dev, res);
+	if (IS_ERR(ks_pcie->va_app_base))
+		return PTR_ERR(ks_pcie->va_app_base);
+
+	/* Create legacy IRQ domain */
+	ks_pcie->legacy_irq_domain =
+			irq_domain_add_linear(ks_pcie->legacy_intc_np,
+					MAX_LEGACY_IRQS,
+					&ks_dw_pcie_legacy_irq_domain_ops,
+					NULL);
+	if (!ks_pcie->legacy_irq_domain) {
+		dev_err(pp->dev, "Failed to add irq domain for legacy irqs\n");
+		return -EINVAL;
+	}
+
+	return dw_pcie_host_init(pp);
+}
diff --git a/drivers/pci/host/pci-keystone.c b/drivers/pci/host/pci-keystone.c
new file mode 100644
index 0000000..1b893bc
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.c
@@ -0,0 +1,415 @@
+/*
+ * PCIe host controller driver for Texas Instruments Keystone SoCs
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ *		http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ * Implementation based on pci-exynos.c and pcie-designware.c
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#include <linux/irqchip/chained_irq.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/irqdomain.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_irq.h>
+#include <linux/of.h>
+#include <linux/of_pci.h>
+#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
+#include <linux/resource.h>
+#include <linux/signal.h>
+
+#include "pcie-designware.h"
+#include "pci-keystone.h"
+
+#define DRIVER_NAME	"keystone-pcie"
+
+/* driver specific constants */
+#define MAX_MSI_HOST_IRQS		8
+#define MAX_LEGACY_HOST_IRQS		4
+
+/* DEV_STAT_CTRL */
+#define PCIE_CAP_BASE		0x70
+
+/* PCIE controller device IDs */
+#define PCIE_RC_K2HK		0xb008
+#define PCIE_RC_K2E		0xb009
+#define PCIE_RC_K2L		0xb00a
+
+#define to_keystone_pcie(x)	container_of(x, struct keystone_pcie, pp)
+
+static void quirk_limit_mrrs(struct pci_dev *dev)
+{
+	struct pci_bus *bus = dev->bus;
+	struct pci_dev *bridge = bus->self;
+	static const struct pci_device_id rc_pci_devids[] = {
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2HK),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2E),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ PCI_DEVICE(PCI_VENDOR_ID_TI, PCIE_RC_K2L),
+		 .class = PCI_CLASS_BRIDGE_PCI << 8, .class_mask = ~0, },
+		{ 0, },
+	};
+
+	if (pci_is_root_bus(bus))
+		return;
+
+	/* look for the host bridge */
+	while (!pci_is_root_bus(bus)) {
+		bridge = bus->self;
+		bus = bus->parent;
+	}
+
+	if (bridge) {
+		/*
+		 * Keystone PCI controller has a h/w limitation of
+		 * 256 bytes maximum read request size.  It can't handle
+		 * anything higher than this.  So force this limit on
+		 * all downstream devices.
+		 */
+		if (pci_match_id(rc_pci_devids, bridge)) {
+			if (pcie_get_readrq(dev) > 256) {
+				dev_info(&dev->dev, "limiting MRRS to 256\n");
+				pcie_set_readrq(dev, 256);
+			}
+		}
+	}
+}
+DECLARE_PCI_FIXUP_ENABLE(PCI_ANY_ID, PCI_ANY_ID, quirk_limit_mrrs);
+
+static int ks_pcie_establish_link(struct keystone_pcie *ks_pcie)
+{
+	struct pcie_port *pp = &ks_pcie->pp;
+	int count = 200;
+
+	dw_pcie_setup_rc(pp);
+
+	if (dw_pcie_link_up(pp)) {
+		dev_err(pp->dev, "Link already up\n");
+		return 0;
+	}
+
+	ks_dw_pcie_initiate_link_train(ks_pcie);
+	/* check if the link is up or not */
+	while (!dw_pcie_link_up(pp)) {
+		usleep_range(100, 1000);
+		if (--count) {
+			ks_dw_pcie_initiate_link_train(ks_pcie);
+			continue;
+		}
+		dev_err(pp->dev, "phy link never came up\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void ks_pcie_msi_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+	u32 offset = irq - ks_pcie->msi_host_irqs[0];
+	struct pcie_port *pp = &ks_pcie->pp;
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	dev_dbg(pp->dev, "ks_pci_msi_irq_handler, irq %d\n", irq);
+
+	/*
+	 * The chained irq handler installation would have replaced normal
+	 * interrupt driver handler so we need to take care of mask/unmask and
+	 * ack operation.
+	 */
+	chained_irq_enter(chip, desc);
+	ks_dw_pcie_handle_msi_irq(ks_pcie, offset);
+	chained_irq_exit(chip, desc);
+}
+
+/**
+ * ks_pcie_legacy_irq_handler() - Handle legacy interrupt
+ * @irq: IRQ line for legacy interrupts
+ * @desc: Pointer to irq descriptor
+ *
+ * Traverse through pending legacy interrupts and invoke handler for each. Also
+ * takes care of interrupt controller level mask/ack operation.
+ */
+static void ks_pcie_legacy_irq_handler(unsigned int irq, struct irq_desc *desc)
+{
+	struct keystone_pcie *ks_pcie = irq_desc_get_handler_data(desc);
+	struct pcie_port *pp = &ks_pcie->pp;
+	u32 irq_offset = irq - ks_pcie->legacy_host_irqs[0];
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
+	dev_dbg(pp->dev, ": Handling legacy irq %d\n", irq);
+
+	/*
+	 * The chained irq handler installation would have replaced normal
+	 * interrupt driver handler so we need to take care of mask/unmask and
+	 * ack operation.
+	 */
+	chained_irq_enter(chip, desc);
+	ks_dw_pcie_handle_legacy_irq(ks_pcie, irq_offset);
+	chained_irq_exit(chip, desc);
+}
+
+static int ks_pcie_get_irq_controller_info(struct keystone_pcie *ks_pcie,
+					   char *controller, int *num_irqs)
+{
+	int temp, max_host_irqs, legacy = 1, *host_irqs, ret = -EINVAL;
+	struct device *dev = ks_pcie->pp.dev;
+	struct device_node *np_pcie = dev->of_node, **np_temp;
+
+	if (!strcmp(controller, "msi-interrupt-controller"))
+		legacy = 0;
+
+	if (legacy) {
+		np_temp = &ks_pcie->legacy_intc_np;
+		max_host_irqs = MAX_LEGACY_HOST_IRQS;
+		host_irqs = &ks_pcie->legacy_host_irqs[0];
+	} else {
+		np_temp = &ks_pcie->msi_intc_np;
+		max_host_irqs = MAX_MSI_HOST_IRQS;
+		host_irqs =  &ks_pcie->msi_host_irqs[0];
+	}
+
+	/* interrupt controller is in a child node */
+	*np_temp = of_find_node_by_name(np_pcie, controller);
+	if (!(*np_temp)) {
+		dev_err(dev, "Node for %s is absent\n", controller);
+		goto out;
+	}
+	temp = of_irq_count(*np_temp);
+	if (!temp)
+		goto out;
+	if (temp > max_host_irqs)
+		dev_warn(dev, "Too many %s interrupts defined %u\n",
+			(legacy ? "legacy" : "MSI"), temp);
+
+	/*
+	 * support upto max_host_irqs. In dt from index 0 to 3 (legacy) or 0 to
+	 * 7 (MSI)
+	 */
+	for (temp = 0; temp < max_host_irqs; temp++) {
+		host_irqs[temp] = irq_of_parse_and_map(*np_temp, temp);
+		if (host_irqs[temp] < 0)
+			break;
+	}
+	if (temp) {
+		*num_irqs = temp;
+		ret = 0;
+	}
+out:
+	return ret;
+}
+
+static void ks_pcie_setup_interrupts(struct keystone_pcie *ks_pcie)
+{
+	int i;
+
+	/* Legacy IRQ */
+	for (i = 0; i < ks_pcie->num_legacy_host_irqs; i++) {
+		irq_set_handler_data(ks_pcie->legacy_host_irqs[i], ks_pcie);
+		irq_set_chained_handler(ks_pcie->legacy_host_irqs[i],
+					ks_pcie_legacy_irq_handler);
+	}
+	ks_dw_pcie_enable_legacy_irqs(ks_pcie);
+
+	/* MSI IRQ */
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		for (i = 0; i < ks_pcie->num_msi_host_irqs; i++) {
+			irq_set_chained_handler(ks_pcie->msi_host_irqs[i],
+						ks_pcie_msi_irq_handler);
+			irq_set_handler_data(ks_pcie->msi_host_irqs[i],
+					     ks_pcie);
+		}
+	}
+}
+
+/*
+ * When a PCI device does not exist during config cycles, keystone host gets a
+ * bus error instead of returning 0xffffffff. This handler always returns 0
+ * for this kind of faults.
+ */
+static int keystone_pcie_fault(unsigned long addr, unsigned int fsr,
+				struct pt_regs *regs)
+{
+	unsigned long instr = *(unsigned long *) instruction_pointer(regs);
+
+	if ((instr & 0x0e100090) == 0x00100090) {
+		int reg = (instr >> 12) & 15;
+
+		regs->uregs[reg] = -1;
+		regs->ARM_pc += 4;
+	}
+
+	return 0;
+}
+
+static void __init ks_pcie_host_init(struct pcie_port *pp)
+{
+	struct keystone_pcie *ks_pcie = to_keystone_pcie(pp);
+	u32 val;
+
+	ks_pcie_establish_link(ks_pcie);
+	ks_dw_pcie_setup_rc_app_regs(ks_pcie);
+	ks_pcie_setup_interrupts(ks_pcie);
+	writew(PCI_IO_RANGE_TYPE_32 | (PCI_IO_RANGE_TYPE_32 << 8),
+			pp->dbi_base + PCI_IO_BASE);
+
+	/* update the Vendor ID */
+	writew(ks_pcie->device_id, pp->dbi_base + PCI_DEVICE_ID);
+
+	/* update the DEV_STAT_CTRL to publish right mrrs */
+	val = readl(pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
+	val &= ~PCI_EXP_DEVCTL_READRQ;
+	/* set the mrrs to 256 bytes */
+	val |= BIT(12);
+	writel(val, pp->dbi_base + PCIE_CAP_BASE + PCI_EXP_DEVCTL);
+
+	/*
+	 * PCIe access errors that result into OCP errors are caught by ARM as
+	 * "External aborts"
+	 */
+	hook_fault_code(17, keystone_pcie_fault, SIGBUS, 0,
+			"Asynchronous external abort");
+}
+
+static struct pcie_host_ops keystone_pcie_host_ops = {
+	.rd_other_conf = ks_dw_pcie_rd_other_conf,
+	.wr_other_conf = ks_dw_pcie_wr_other_conf,
+	.link_up = ks_dw_pcie_link_up,
+	.host_init = ks_pcie_host_init,
+	.msi_set_irq = ks_dw_pcie_msi_set_irq,
+	.msi_clear_irq = ks_dw_pcie_msi_clear_irq,
+	.get_msi_addr = ks_dw_pcie_get_msi_addr,
+	.msi_host_init = ks_dw_pcie_msi_host_init,
+	.scan_bus = ks_dw_pcie_v3_65_scan_bus,
+};
+
+static int __init ks_add_pcie_port(struct keystone_pcie *ks_pcie,
+			 struct platform_device *pdev)
+{
+	struct pcie_port *pp = &ks_pcie->pp;
+	int ret;
+
+	ret = ks_pcie_get_irq_controller_info(ks_pcie,
+					"legacy-interrupt-controller",
+					&ks_pcie->num_legacy_host_irqs);
+	if (ret)
+		return ret;
+
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		ret = ks_pcie_get_irq_controller_info(ks_pcie,
+						"msi-interrupt-controller",
+						&ks_pcie->num_msi_host_irqs);
+		if (ret)
+			return ret;
+	}
+
+	pp->root_bus_nr = -1;
+	pp->ops = &keystone_pcie_host_ops;
+	ret = ks_dw_pcie_host_init(ks_pcie, ks_pcie->msi_intc_np);
+	if (ret) {
+		dev_err(&pdev->dev, "failed to initialize host\n");
+		return ret;
+	}
+
+	return ret;
+}
+
+static const struct of_device_id ks_pcie_of_match[] = {
+	{
+		.type = "pci",
+		.compatible = "ti,keystone-pcie",
+	},
+	{ },
+};
+MODULE_DEVICE_TABLE(of, ks_pcie_of_match);
+
+static int __exit ks_pcie_remove(struct platform_device *pdev)
+{
+	struct keystone_pcie *ks_pcie = platform_get_drvdata(pdev);
+
+	clk_disable_unprepare(ks_pcie->clk);
+
+	return 0;
+}
+
+static int __init ks_pcie_probe(struct platform_device *pdev)
+{
+	struct device *dev = &pdev->dev;
+	struct keystone_pcie *ks_pcie;
+	struct pcie_port *pp;
+	struct resource *res;
+	void __iomem *reg_p;
+	struct phy *phy;
+	int ret = 0;
+
+	ks_pcie = devm_kzalloc(&pdev->dev, sizeof(*ks_pcie),
+				GFP_KERNEL);
+	if (!ks_pcie) {
+		dev_err(dev, "no memory for keystone pcie\n");
+		return -ENOMEM;
+	}
+	pp = &ks_pcie->pp;
+
+	/* initialize SerDes Phy if present */
+	phy = devm_phy_get(dev, "pcie-phy");
+	if (!IS_ERR_OR_NULL(phy)) {
+		ret = phy_init(phy);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* index 2 is to read PCI DEVICE_ID */
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
+	reg_p = devm_ioremap_resource(dev, res);
+	if (IS_ERR(reg_p))
+		return PTR_ERR(reg_p);
+	ks_pcie->device_id = readl(reg_p) >> 16;
+	devm_iounmap(dev, reg_p);
+	devm_release_mem_region(dev, res->start, resource_size(res));
+
+	pp->dev = dev;
+	platform_set_drvdata(pdev, ks_pcie);
+	ks_pcie->clk = devm_clk_get(dev, "pcie");
+	if (IS_ERR(ks_pcie->clk)) {
+		dev_err(dev, "Failed to get pcie rc clock\n");
+		return PTR_ERR(ks_pcie->clk);
+	}
+	ret = clk_prepare_enable(ks_pcie->clk);
+	if (ret)
+		return ret;
+
+	ret = ks_add_pcie_port(ks_pcie, pdev);
+	if (ret < 0)
+		goto fail_clk;
+
+	return 0;
+fail_clk:
+	clk_disable_unprepare(ks_pcie->clk);
+
+	return ret;
+}
+
+static struct platform_driver ks_pcie_driver __refdata = {
+	.probe  = ks_pcie_probe,
+	.remove = __exit_p(ks_pcie_remove),
+	.driver = {
+		.name	= "keystone-pcie",
+		.owner	= THIS_MODULE,
+		.of_match_table = of_match_ptr(ks_pcie_of_match),
+	},
+};
+
+module_platform_driver(ks_pcie_driver);
+
+MODULE_AUTHOR("Murali Karicheri <m-karicheri2@ti.com>");
+MODULE_DESCRIPTION("Keystone PCIe host controller driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h
new file mode 100644
index 0000000..1fc1fce
--- /dev/null
+++ b/drivers/pci/host/pci-keystone.h
@@ -0,0 +1,58 @@
+/*
+ * Keystone PCI Controller's common includes
+ *
+ * Copyright (C) 2013-2014 Texas Instruments., Ltd.
+ *		http://www.ti.com
+ *
+ * Author: Murali Karicheri <m-karicheri2@ti.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#define MAX_LEGACY_IRQS			4
+#define MAX_MSI_HOST_IRQS		8
+#define MAX_LEGACY_HOST_IRQS		4
+
+struct keystone_pcie {
+	struct	clk		*clk;
+	struct	pcie_port	pp;
+	/* PCI Device ID */
+	u32			device_id;
+	int			num_legacy_host_irqs;
+	int			legacy_host_irqs[MAX_LEGACY_HOST_IRQS];
+	struct			device_node *legacy_intc_np;
+
+	int			num_msi_host_irqs;
+	int			msi_host_irqs[MAX_MSI_HOST_IRQS];
+	struct			device_node *msi_intc_np;
+	struct irq_domain	*legacy_irq_domain;
+
+	/* Application register space */
+	void __iomem		*va_app_base;
+	struct resource		app;
+};
+
+/* Keystone DW specific MSI controller APIs/definitions */
+void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset);
+u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp);
+
+/* Keystone specific PCI controller APIs */
+void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie);
+void ks_dw_pcie_handle_legacy_irq(struct keystone_pcie *ks_pcie, int offset);
+int  ks_dw_pcie_host_init(struct keystone_pcie *ks_pcie,
+			struct device_node *msi_intc_np);
+int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+		unsigned int devfn, int where, int size, u32 val);
+int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus,
+		unsigned int devfn, int where, int size, u32 *val);
+void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie);
+int ks_dw_pcie_link_up(struct pcie_port *pp);
+void ks_dw_pcie_initiate_link_train(struct keystone_pcie *ks_pcie);
+void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq);
+void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq);
+void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp);
+int ks_dw_pcie_msi_host_init(struct pcie_port *pp,
+		struct msi_chip *chip);
diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c
index a8c6f1a..b1315e1 100644
--- a/drivers/pci/host/pci-mvebu.c
+++ b/drivers/pci/host/pci-mvebu.c
@@ -873,7 +873,7 @@
 	rangesz = pna + na + ns;
 	nranges = rlen / sizeof(__be32) / rangesz;
 
-	for (i = 0; i < nranges; i++) {
+	for (i = 0; i < nranges; i++, range += rangesz) {
 		u32 flags = of_read_number(range, 1);
 		u32 slot = of_read_number(range + 1, 1);
 		u64 cpuaddr = of_read_number(range + na, pna);
@@ -883,14 +883,14 @@
 			rtype = IORESOURCE_IO;
 		else if (DT_FLAGS_TO_TYPE(flags) == DT_TYPE_MEM32)
 			rtype = IORESOURCE_MEM;
+		else
+			continue;
 
 		if (slot == PCI_SLOT(devfn) && type == rtype) {
 			*tgt = DT_CPUADDR_TO_TARGET(cpuaddr);
 			*attr = DT_CPUADDR_TO_ATTR(cpuaddr);
 			return 0;
 		}
-
-		range += rangesz;
 	}
 
 	return -ENOENT;
diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c
index 946935d..3d43874 100644
--- a/drivers/pci/host/pci-tegra.c
+++ b/drivers/pci/host/pci-tegra.c
@@ -38,6 +38,7 @@
 #include <linux/of_pci.h>
 #include <linux/of_platform.h>
 #include <linux/pci.h>
+#include <linux/phy/phy.h>
 #include <linux/platform_device.h>
 #include <linux/reset.h>
 #include <linux/sizes.h>
@@ -115,13 +116,20 @@
 
 #define AFI_INTR_CODE			0xb8
 #define  AFI_INTR_CODE_MASK		0xf
-#define  AFI_INTR_AXI_SLAVE_ERROR	1
-#define  AFI_INTR_AXI_DECODE_ERROR	2
+#define  AFI_INTR_INI_SLAVE_ERROR	1
+#define  AFI_INTR_INI_DECODE_ERROR	2
 #define  AFI_INTR_TARGET_ABORT		3
 #define  AFI_INTR_MASTER_ABORT		4
 #define  AFI_INTR_INVALID_WRITE		5
 #define  AFI_INTR_LEGACY		6
 #define  AFI_INTR_FPCI_DECODE_ERROR	7
+#define  AFI_INTR_AXI_DECODE_ERROR	8
+#define  AFI_INTR_FPCI_TIMEOUT		9
+#define  AFI_INTR_PE_PRSNT_SENSE	10
+#define  AFI_INTR_PE_CLKREQ_SENSE	11
+#define  AFI_INTR_CLKCLAMP_SENSE	12
+#define  AFI_INTR_RDY4PD_SENSE		13
+#define  AFI_INTR_P2P_ERROR		14
 
 #define AFI_INTR_SIGNATURE	0xbc
 #define AFI_UPPER_FPCI_ADDRESS	0xc0
@@ -152,8 +160,10 @@
 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK	(0xf << 20)
 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_SINGLE	(0x0 << 20)
 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_420	(0x0 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1	(0x0 << 20)
 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_DUAL	(0x1 << 20)
 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_222	(0x1 << 20)
+#define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1	(0x1 << 20)
 #define  AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_411	(0x2 << 20)
 
 #define AFI_FUSE			0x104
@@ -165,12 +175,21 @@
 #define  AFI_PEX_CTRL_RST		(1 << 0)
 #define  AFI_PEX_CTRL_CLKREQ_EN		(1 << 1)
 #define  AFI_PEX_CTRL_REFCLK_EN		(1 << 3)
+#define  AFI_PEX_CTRL_OVERRIDE_EN	(1 << 4)
+
+#define AFI_PLLE_CONTROL		0x160
+#define  AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL (1 << 9)
+#define  AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN (1 << 1)
 
 #define AFI_PEXBIAS_CTRL_0		0x168
 
 #define RP_VEND_XP	0x00000F00
 #define  RP_VEND_XP_DL_UP	(1 << 30)
 
+#define RP_PRIV_MISC	0x00000FE0
+#define  RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT (0xE << 0)
+#define  RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT (0xF << 0)
+
 #define RP_LINK_CONTROL_STATUS			0x00000090
 #define  RP_LINK_CONTROL_STATUS_DL_LINK_ACTIVE	0x20000000
 #define  RP_LINK_CONTROL_STATUS_LINKSTAT_MASK	0x3fff0000
@@ -197,6 +216,7 @@
 
 #define PADS_REFCLK_CFG0			0x000000C8
 #define PADS_REFCLK_CFG1			0x000000CC
+#define PADS_REFCLK_BIAS			0x000000D0
 
 /*
  * Fields in PADS_REFCLK_CFG*. Those registers form an array of 16-bit
@@ -236,6 +256,7 @@
 	bool has_pex_bias_ctrl;
 	bool has_intr_prsnt_sense;
 	bool has_cml_clk;
+	bool has_gen2;
 };
 
 static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip)
@@ -253,6 +274,7 @@
 	struct list_head buses;
 	struct resource *cs;
 
+	struct resource all;
 	struct resource io;
 	struct resource mem;
 	struct resource prefetch;
@@ -267,6 +289,8 @@
 	struct reset_control *afi_rst;
 	struct reset_control *pcie_xrst;
 
+	struct phy *phy;
+
 	struct tegra_msi msi;
 
 	struct list_head ports;
@@ -382,7 +406,7 @@
 	for (i = 0; i < 16; i++) {
 		unsigned long virt = (unsigned long)bus->area->addr +
 				     i * SZ_64K;
-		phys_addr_t phys = cs + i * SZ_1M + busnr * SZ_64K;
+		phys_addr_t phys = cs + i * SZ_16M + busnr * SZ_64K;
 
 		err = ioremap_page_range(virt, virt + SZ_64K, phys, prot);
 		if (err < 0) {
@@ -561,6 +585,8 @@
 	if (soc->has_pex_clkreq_en)
 		value |= AFI_PEX_CTRL_CLKREQ_EN;
 
+	value |= AFI_PEX_CTRL_OVERRIDE_EN;
+
 	afi_writel(port->pcie, value, ctrl);
 
 	tegra_pcie_port_reset(port);
@@ -568,6 +594,7 @@
 
 static void tegra_pcie_port_disable(struct tegra_pcie_port *port)
 {
+	const struct tegra_pcie_soc_data *soc = port->pcie->soc_data;
 	unsigned long ctrl = tegra_pcie_port_get_pex_ctrl(port);
 	unsigned long value;
 
@@ -578,6 +605,10 @@
 
 	/* disable reference clock */
 	value = afi_readl(port->pcie, ctrl);
+
+	if (soc->has_pex_clkreq_en)
+		value &= ~AFI_PEX_CTRL_CLKREQ_EN;
+
 	value &= ~AFI_PEX_CTRL_REFCLK_EN;
 	afi_writel(port->pcie, value, ctrl);
 }
@@ -626,7 +657,18 @@
 static int tegra_pcie_setup(int nr, struct pci_sys_data *sys)
 {
 	struct tegra_pcie *pcie = sys_to_pcie(sys);
-	phys_addr_t io_start = pci_pio_to_address(pcie->io.start);
+	int err;
+	phys_addr_t io_start;
+
+	err = devm_request_resource(pcie->dev, &pcie->all, &pcie->mem);
+	if (err < 0)
+		return err;
+
+	err = devm_request_resource(pcie->dev, &pcie->all, &pcie->prefetch);
+	if (err)
+		return err;
+
+	io_start = pci_pio_to_address(pcie->io.start);
 
 	pci_add_resource_offset(&sys->resources, &pcie->mem, sys->mem_offset);
 	pci_add_resource_offset(&sys->resources, &pcie->prefetch,
@@ -685,9 +727,15 @@
 		"Target abort",
 		"Master abort",
 		"Invalid write",
+		"Legacy interrupt",
 		"Response decoding error",
 		"AXI response decoding error",
 		"Transaction timeout",
+		"Slot present pin change",
+		"Slot clock request change",
+		"TMS clock ramp change",
+		"TMS ready for power down",
+		"Peer2Peer error",
 	};
 	struct tegra_pcie *pcie = arg;
 	u32 code, signature;
@@ -794,30 +842,27 @@
 	afi_writel(pcie, 0, AFI_MSI_BAR_SZ);
 }
 
-static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+static int tegra_pcie_pll_wait(struct tegra_pcie *pcie, unsigned long timeout)
 {
 	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
-	struct tegra_pcie_port *port;
-	unsigned int timeout;
-	unsigned long value;
+	u32 value;
 
-	/* power down PCIe slot clock bias pad */
-	if (soc->has_pex_bias_ctrl)
-		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
+	timeout = jiffies + msecs_to_jiffies(timeout);
 
-	/* configure mode and disable all ports */
-	value = afi_readl(pcie, AFI_PCIE_CONFIG);
-	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
-	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
+	while (time_before(jiffies, timeout)) {
+		value = pads_readl(pcie, soc->pads_pll_ctl);
+		if (value & PADS_PLL_CTL_LOCKDET)
+			return 0;
+	}
 
-	list_for_each_entry(port, &pcie->ports, list)
-		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+	return -ETIMEDOUT;
+}
 
-	afi_writel(pcie, value, AFI_PCIE_CONFIG);
-
-	value = afi_readl(pcie, AFI_FUSE);
-	value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
-	afi_writel(pcie, value, AFI_FUSE);
+static int tegra_pcie_phy_enable(struct tegra_pcie *pcie)
+{
+	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+	u32 value;
+	int err;
 
 	/* initialize internal PHY, enable up to 16 PCIE lanes */
 	pads_writel(pcie, 0x0, PADS_CTL_SEL);
@@ -836,6 +881,13 @@
 	value |= PADS_PLL_CTL_REFCLK_INTERNAL_CML | soc->tx_ref_sel;
 	pads_writel(pcie, value, soc->pads_pll_ctl);
 
+	/* reset PLL */
+	value = pads_readl(pcie, soc->pads_pll_ctl);
+	value &= ~PADS_PLL_CTL_RST_B4SM;
+	pads_writel(pcie, value, soc->pads_pll_ctl);
+
+	usleep_range(20, 100);
+
 	/* take PLL out of reset  */
 	value = pads_readl(pcie, soc->pads_pll_ctl);
 	value |= PADS_PLL_CTL_RST_B4SM;
@@ -848,15 +900,11 @@
 		pads_writel(pcie, PADS_REFCLK_CFG_VALUE, PADS_REFCLK_CFG1);
 
 	/* wait for the PLL to lock */
-	timeout = 300;
-	do {
-		value = pads_readl(pcie, soc->pads_pll_ctl);
-		usleep_range(1000, 2000);
-		if (--timeout == 0) {
-			pr_err("Tegra PCIe error: timeout waiting for PLL\n");
-			return -EBUSY;
-		}
-	} while (!(value & PADS_PLL_CTL_LOCKDET));
+	err = tegra_pcie_pll_wait(pcie, 500);
+	if (err < 0) {
+		dev_err(pcie->dev, "PLL failed to lock: %d\n", err);
+		return err;
+	}
 
 	/* turn off IDDQ override */
 	value = pads_readl(pcie, PADS_CTL);
@@ -868,6 +916,58 @@
 	value |= PADS_CTL_TX_DATA_EN_1L | PADS_CTL_RX_DATA_EN_1L;
 	pads_writel(pcie, value, PADS_CTL);
 
+	return 0;
+}
+
+static int tegra_pcie_enable_controller(struct tegra_pcie *pcie)
+{
+	const struct tegra_pcie_soc_data *soc = pcie->soc_data;
+	struct tegra_pcie_port *port;
+	unsigned long value;
+	int err;
+
+	/* enable PLL power down */
+	if (pcie->phy) {
+		value = afi_readl(pcie, AFI_PLLE_CONTROL);
+		value &= ~AFI_PLLE_CONTROL_BYPASS_PADS2PLLE_CONTROL;
+		value |= AFI_PLLE_CONTROL_PADS2PLLE_CONTROL_EN;
+		afi_writel(pcie, value, AFI_PLLE_CONTROL);
+	}
+
+	/* power down PCIe slot clock bias pad */
+	if (soc->has_pex_bias_ctrl)
+		afi_writel(pcie, 0, AFI_PEXBIAS_CTRL_0);
+
+	/* configure mode and disable all ports */
+	value = afi_readl(pcie, AFI_PCIE_CONFIG);
+	value &= ~AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_MASK;
+	value |= AFI_PCIE_CONFIG_PCIE_DISABLE_ALL | pcie->xbar_config;
+
+	list_for_each_entry(port, &pcie->ports, list)
+		value &= ~AFI_PCIE_CONFIG_PCIE_DISABLE(port->index);
+
+	afi_writel(pcie, value, AFI_PCIE_CONFIG);
+
+	if (soc->has_gen2) {
+		value = afi_readl(pcie, AFI_FUSE);
+		value &= ~AFI_FUSE_PCIE_T0_GEN2_DIS;
+		afi_writel(pcie, value, AFI_FUSE);
+	} else {
+		value = afi_readl(pcie, AFI_FUSE);
+		value |= AFI_FUSE_PCIE_T0_GEN2_DIS;
+		afi_writel(pcie, value, AFI_FUSE);
+	}
+
+	if (!pcie->phy)
+		err = tegra_pcie_phy_enable(pcie);
+	else
+		err = phy_power_on(pcie->phy);
+
+	if (err < 0) {
+		dev_err(pcie->dev, "failed to power on PHY: %d\n", err);
+		return err;
+	}
+
 	/* take the PCIe interface module out of reset */
 	reset_control_deassert(pcie->pcie_xrst);
 
@@ -901,6 +1001,10 @@
 
 	/* TODO: disable and unprepare clocks? */
 
+	err = phy_power_off(pcie->phy);
+	if (err < 0)
+		dev_warn(pcie->dev, "failed to power off PHY: %d\n", err);
+
 	reset_control_assert(pcie->pcie_xrst);
 	reset_control_assert(pcie->afi_rst);
 	reset_control_assert(pcie->pex_rst);
@@ -1022,6 +1126,19 @@
 		return err;
 	}
 
+	pcie->phy = devm_phy_optional_get(pcie->dev, "pcie");
+	if (IS_ERR(pcie->phy)) {
+		err = PTR_ERR(pcie->phy);
+		dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
+		return err;
+	}
+
+	err = phy_init(pcie->phy);
+	if (err < 0) {
+		dev_err(&pdev->dev, "failed to initialize PHY: %d\n", err);
+		return err;
+	}
+
 	err = tegra_pcie_power_on(pcie);
 	if (err) {
 		dev_err(&pdev->dev, "failed to power up: %d\n", err);
@@ -1080,10 +1197,17 @@
 
 static int tegra_pcie_put_resources(struct tegra_pcie *pcie)
 {
+	int err;
+
 	if (pcie->irq > 0)
 		free_irq(pcie->irq, pcie);
 
 	tegra_pcie_power_off(pcie);
+
+	err = phy_exit(pcie->phy);
+	if (err < 0)
+		dev_err(pcie->dev, "failed to teardown PHY: %d\n", err);
+
 	return 0;
 }
 
@@ -1172,8 +1296,10 @@
 		return hwirq;
 
 	irq = irq_create_mapping(msi->domain, hwirq);
-	if (!irq)
+	if (!irq) {
+		tegra_msi_free(msi, hwirq);
 		return -EINVAL;
+	}
 
 	irq_set_msi_desc(irq, desc);
 
@@ -1191,8 +1317,10 @@
 {
 	struct tegra_msi *msi = to_tegra_msi(chip);
 	struct irq_data *d = irq_get_irq_data(irq);
+	irq_hw_number_t hwirq = irqd_to_hwirq(d);
 
-	tegra_msi_free(msi, d->hwirq);
+	irq_dispose_mapping(irq);
+	tegra_msi_free(msi, hwirq);
 }
 
 static struct irq_chip tegra_msi_irq_chip = {
@@ -1329,7 +1457,19 @@
 {
 	struct device_node *np = pcie->dev->of_node;
 
-	if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+	if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+		switch (lanes) {
+		case 0x0000104:
+			dev_info(pcie->dev, "4x1, 1x1 configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X4_X1;
+			return 0;
+
+		case 0x0000102:
+			dev_info(pcie->dev, "2x1, 1x1 configuration\n");
+			*xbar = AFI_PCIE_CONFIG_SM2TMS0_XBAR_CONFIG_X2_X1;
+			return 0;
+		}
+	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
 		switch (lanes) {
 		case 0x00000204:
 			dev_info(pcie->dev, "4x1, 2x1 configuration\n");
@@ -1437,7 +1577,23 @@
 	struct device_node *np = pcie->dev->of_node;
 	unsigned int i = 0;
 
-	if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
+	if (of_device_is_compatible(np, "nvidia,tegra124-pcie")) {
+		pcie->num_supplies = 7;
+
+		pcie->supplies = devm_kcalloc(pcie->dev, pcie->num_supplies,
+					      sizeof(*pcie->supplies),
+					      GFP_KERNEL);
+		if (!pcie->supplies)
+			return -ENOMEM;
+
+		pcie->supplies[i++].supply = "avddio-pex";
+		pcie->supplies[i++].supply = "dvddio-pex";
+		pcie->supplies[i++].supply = "avdd-pex-pll";
+		pcie->supplies[i++].supply = "hvdd-pex";
+		pcie->supplies[i++].supply = "hvdd-pex-pll-e";
+		pcie->supplies[i++].supply = "vddio-pex-ctl";
+		pcie->supplies[i++].supply = "avdd-pll-erefe";
+	} else if (of_device_is_compatible(np, "nvidia,tegra30-pcie")) {
 		bool need_pexa = false, need_pexb = false;
 
 		/* VDD_PEXA and AVDD_PEXA supply lanes 0 to 3 */
@@ -1516,6 +1672,12 @@
 	struct resource res;
 	int err;
 
+	memset(&pcie->all, 0, sizeof(pcie->all));
+	pcie->all.flags = IORESOURCE_MEM;
+	pcie->all.name = np->full_name;
+	pcie->all.start = ~0;
+	pcie->all.end = 0;
+
 	if (of_pci_range_parser_init(&parser, np)) {
 		dev_err(pcie->dev, "missing \"ranges\" property\n");
 		return -EINVAL;
@@ -1529,21 +1691,31 @@
 		switch (res.flags & IORESOURCE_TYPE_BITS) {
 		case IORESOURCE_IO:
 			memcpy(&pcie->io, &res, sizeof(res));
-			pcie->io.name = "I/O";
+			pcie->io.name = np->full_name;
 			break;
 
 		case IORESOURCE_MEM:
 			if (res.flags & IORESOURCE_PREFETCH) {
 				memcpy(&pcie->prefetch, &res, sizeof(res));
-				pcie->prefetch.name = "PREFETCH";
+				pcie->prefetch.name = "prefetchable";
 			} else {
 				memcpy(&pcie->mem, &res, sizeof(res));
-				pcie->mem.name = "MEM";
+				pcie->mem.name = "non-prefetchable";
 			}
 			break;
 		}
+
+		if (res.start <= pcie->all.start)
+			pcie->all.start = res.start;
+
+		if (res.end >= pcie->all.end)
+			pcie->all.end = res.end;
 	}
 
+	err = devm_request_resource(pcie->dev, &iomem_resource, &pcie->all);
+	if (err < 0)
+		return err;
+
 	err = of_pci_parse_bus_range(np, &pcie->busn);
 	if (err < 0) {
 		dev_err(pcie->dev, "failed to parse ranges property: %d\n",
@@ -1645,6 +1817,12 @@
 	unsigned int retries = 3;
 	unsigned long value;
 
+	/* override presence detection */
+	value = readl(port->base + RP_PRIV_MISC);
+	value &= ~RP_PRIV_MISC_PRSNT_MAP_EP_ABSNT;
+	value |= RP_PRIV_MISC_PRSNT_MAP_EP_PRSNT;
+	writel(value, port->base + RP_PRIV_MISC);
+
 	do {
 		unsigned int timeout = TEGRA_PCIE_LINKUP_TIMEOUT;
 
@@ -1725,6 +1903,7 @@
 	.has_pex_bias_ctrl = false,
 	.has_intr_prsnt_sense = false,
 	.has_cml_clk = false,
+	.has_gen2 = false,
 };
 
 static const struct tegra_pcie_soc_data tegra30_pcie_data = {
@@ -1736,9 +1915,23 @@
 	.has_pex_bias_ctrl = true,
 	.has_intr_prsnt_sense = true,
 	.has_cml_clk = true,
+	.has_gen2 = false,
+};
+
+static const struct tegra_pcie_soc_data tegra124_pcie_data = {
+	.num_ports = 2,
+	.msi_base_shift = 8,
+	.pads_pll_ctl = PADS_PLL_CTL_TEGRA30,
+	.tx_ref_sel = PADS_PLL_CTL_TXCLKREF_BUF_EN,
+	.has_pex_clkreq_en = true,
+	.has_pex_bias_ctrl = true,
+	.has_intr_prsnt_sense = true,
+	.has_cml_clk = true,
+	.has_gen2 = true,
 };
 
 static const struct of_device_id tegra_pcie_of_match[] = {
+	{ .compatible = "nvidia,tegra124-pcie", .data = &tegra124_pcie_data },
 	{ .compatible = "nvidia,tegra30-pcie", .data = &tegra30_pcie_data },
 	{ .compatible = "nvidia,tegra20-pcie", .data = &tegra20_pcie_data },
 	{ },
diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c
index 52bd3a1..34e7366 100644
--- a/drivers/pci/host/pcie-designware.c
+++ b/drivers/pci/host/pcie-designware.c
@@ -73,6 +73,8 @@
 
 static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys)
 {
+	BUG_ON(!sys->private_data);
+
 	return sys->private_data;
 }
 
@@ -261,11 +263,6 @@
 	int irq, pos0, pos1, i;
 	struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata);
 
-	if (!pp) {
-		BUG();
-		return -EINVAL;
-	}
-
 	pos0 = find_first_zero_bit(pp->msi_irq_in_use,
 			MAX_MSI_IRQS);
 	if (pos0 % no_irqs) {
@@ -326,10 +323,6 @@
 	/* get the port structure */
 	msi = irq_data_get_msi(data);
 	pp = sys_to_pcie(msi->dev->bus->sysdata);
-	if (!pp) {
-		BUG();
-		return;
-	}
 
 	/* undo what was done in assign_irq */
 	pos = data->hwirq;
@@ -350,11 +343,6 @@
 	struct msi_msg msg;
 	struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata);
 
-	if (!pp) {
-		BUG();
-		return -EINVAL;
-	}
-
 	pci_read_config_word(pdev, desc->msi_attrib.pos+PCI_MSI_FLAGS,
 				&msg_ctr);
 	msgvec = (msg_ctr&PCI_MSI_FLAGS_QSIZE) >> 4;
@@ -373,12 +361,17 @@
 	 */
 	desc->msi_attrib.multiple = msgvec;
 
-	if (pp->ops->get_msi_data)
-		msg.address_lo = pp->ops->get_msi_data(pp);
+	if (pp->ops->get_msi_addr)
+		msg.address_lo = pp->ops->get_msi_addr(pp);
 	else
 		msg.address_lo = virt_to_phys((void *)pp->msi_data);
 	msg.address_hi = 0x0;
-	msg.data = pos;
+
+	if (pp->ops->get_msi_data)
+		msg.data = pp->ops->get_msi_data(pp, pos);
+	else
+		msg.data = pos;
+
 	write_msi_msg(irq, &msg);
 
 	return 0;
@@ -425,7 +418,7 @@
 	struct resource *cfg_res;
 	u32 val, na, ns;
 	const __be32 *addrp;
-	int i, index;
+	int i, index, ret;
 
 	/* Find the address cell size and the number of cells in order to get
 	 * the untranslated address.
@@ -435,16 +428,16 @@
 
 	cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config");
 	if (cfg_res) {
-		pp->config.cfg0_size = resource_size(cfg_res)/2;
-		pp->config.cfg1_size = resource_size(cfg_res)/2;
+		pp->cfg0_size = resource_size(cfg_res)/2;
+		pp->cfg1_size = resource_size(cfg_res)/2;
 		pp->cfg0_base = cfg_res->start;
-		pp->cfg1_base = cfg_res->start + pp->config.cfg0_size;
+		pp->cfg1_base = cfg_res->start + pp->cfg0_size;
 
 		/* Find the untranslated configuration space address */
 		index = of_property_match_string(np, "reg-names", "config");
-		addrp = of_get_address(np, index, false, false);
+		addrp = of_get_address(np, index, NULL, NULL);
 		pp->cfg0_mod_base = of_read_number(addrp, ns);
-		pp->cfg1_mod_base = pp->cfg0_mod_base + pp->config.cfg0_size;
+		pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size;
 	} else {
 		dev_err(pp->dev, "missing *config* reg space\n");
 	}
@@ -466,9 +459,9 @@
 			pp->io.end = min_t(resource_size_t,
 					   IO_SPACE_LIMIT,
 					   range.pci_addr + range.size
-					   + global_io_offset);
-			pp->config.io_size = resource_size(&pp->io);
-			pp->config.io_bus_addr = range.pci_addr;
+					   + global_io_offset - 1);
+			pp->io_size = resource_size(&pp->io);
+			pp->io_bus_addr = range.pci_addr;
 			pp->io_base = range.cpu_addr;
 
 			/* Find the untranslated IO space address */
@@ -478,8 +471,8 @@
 		if (restype == IORESOURCE_MEM) {
 			of_pci_range_to_resource(&range, np, &pp->mem);
 			pp->mem.name = "MEM";
-			pp->config.mem_size = resource_size(&pp->mem);
-			pp->config.mem_bus_addr = range.pci_addr;
+			pp->mem_size = resource_size(&pp->mem);
+			pp->mem_bus_addr = range.pci_addr;
 
 			/* Find the untranslated MEM space address */
 			pp->mem_mod_base = of_read_number(parser.range -
@@ -487,19 +480,29 @@
 		}
 		if (restype == 0) {
 			of_pci_range_to_resource(&range, np, &pp->cfg);
-			pp->config.cfg0_size = resource_size(&pp->cfg)/2;
-			pp->config.cfg1_size = resource_size(&pp->cfg)/2;
+			pp->cfg0_size = resource_size(&pp->cfg)/2;
+			pp->cfg1_size = resource_size(&pp->cfg)/2;
 			pp->cfg0_base = pp->cfg.start;
-			pp->cfg1_base = pp->cfg.start + pp->config.cfg0_size;
+			pp->cfg1_base = pp->cfg.start + pp->cfg0_size;
 
 			/* Find the untranslated configuration space address */
 			pp->cfg0_mod_base = of_read_number(parser.range -
 							   parser.np + na, ns);
 			pp->cfg1_mod_base = pp->cfg0_mod_base +
-					    pp->config.cfg0_size;
+					    pp->cfg0_size;
 		}
 	}
 
+	ret = of_pci_parse_bus_range(np, &pp->busn);
+	if (ret < 0) {
+		pp->busn.name = np->name;
+		pp->busn.start = 0;
+		pp->busn.end = 0xff;
+		pp->busn.flags = IORESOURCE_BUS;
+		dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n",
+			ret, &pp->busn);
+	}
+
 	if (!pp->dbi_base) {
 		pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start,
 					resource_size(&pp->cfg));
@@ -511,17 +514,22 @@
 
 	pp->mem_base = pp->mem.start;
 
-	pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
-					pp->config.cfg0_size);
 	if (!pp->va_cfg0_base) {
-		dev_err(pp->dev, "error with ioremap in function\n");
-		return -ENOMEM;
+		pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base,
+						pp->cfg0_size);
+		if (!pp->va_cfg0_base) {
+			dev_err(pp->dev, "error with ioremap in function\n");
+			return -ENOMEM;
+		}
 	}
-	pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
-					pp->config.cfg1_size);
+
 	if (!pp->va_cfg1_base) {
-		dev_err(pp->dev, "error with ioremap\n");
-		return -ENOMEM;
+		pp->va_cfg1_base = devm_ioremap(pp->dev, pp->cfg1_base,
+						pp->cfg1_size);
+		if (!pp->va_cfg1_base) {
+			dev_err(pp->dev, "error with ioremap\n");
+			return -ENOMEM;
+		}
 	}
 
 	if (of_property_read_u32(np, "num-lanes", &pp->lanes)) {
@@ -530,16 +538,22 @@
 	}
 
 	if (IS_ENABLED(CONFIG_PCI_MSI)) {
-		pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
-					MAX_MSI_IRQS, &msi_domain_ops,
-					&dw_pcie_msi_chip);
-		if (!pp->irq_domain) {
-			dev_err(pp->dev, "irq domain init failed\n");
-			return -ENXIO;
-		}
+		if (!pp->ops->msi_host_init) {
+			pp->irq_domain = irq_domain_add_linear(pp->dev->of_node,
+						MAX_MSI_IRQS, &msi_domain_ops,
+						&dw_pcie_msi_chip);
+			if (!pp->irq_domain) {
+				dev_err(pp->dev, "irq domain init failed\n");
+				return -ENXIO;
+			}
 
-		for (i = 0; i < MAX_MSI_IRQS; i++)
-			irq_create_mapping(pp->irq_domain, i);
+			for (i = 0; i < MAX_MSI_IRQS; i++)
+				irq_create_mapping(pp->irq_domain, i);
+		} else {
+			ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip);
+			if (ret < 0)
+				return ret;
+		}
 	}
 
 	if (pp->ops->host_init)
@@ -558,7 +572,6 @@
 	dw_pci.private_data = (void **)&pp;
 
 	pci_common_init_dev(pp->dev, &dw_pci);
-	pci_assign_unassigned_resources();
 #ifdef CONFIG_PCI_DOMAINS
 	dw_pci.domain++;
 #endif
@@ -573,7 +586,7 @@
 			  PCIE_ATU_VIEWPORT);
 	dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE);
 	dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE);
-	dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->config.cfg0_size - 1,
+	dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1,
 			  PCIE_ATU_LIMIT);
 	dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
 	dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -589,7 +602,7 @@
 	dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1);
 	dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE);
 	dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE);
-	dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->config.cfg1_size - 1,
+	dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1,
 			  PCIE_ATU_LIMIT);
 	dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET);
 	dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET);
@@ -604,10 +617,10 @@
 	dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1);
 	dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE);
 	dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE);
-	dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->config.mem_size - 1,
+	dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1,
 			  PCIE_ATU_LIMIT);
-	dw_pcie_writel_rc(pp, pp->config.mem_bus_addr, PCIE_ATU_LOWER_TARGET);
-	dw_pcie_writel_rc(pp, upper_32_bits(pp->config.mem_bus_addr),
+	dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET);
+	dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr),
 			  PCIE_ATU_UPPER_TARGET);
 	dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
 }
@@ -620,10 +633,10 @@
 	dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1);
 	dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE);
 	dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE);
-	dw_pcie_writel_rc(pp, pp->io_mod_base + pp->config.io_size - 1,
+	dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1,
 			  PCIE_ATU_LIMIT);
-	dw_pcie_writel_rc(pp, pp->config.io_bus_addr, PCIE_ATU_LOWER_TARGET);
-	dw_pcie_writel_rc(pp, upper_32_bits(pp->config.io_bus_addr),
+	dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET);
+	dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr),
 			  PCIE_ATU_UPPER_TARGET);
 	dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2);
 }
@@ -707,11 +720,6 @@
 	struct pcie_port *pp = sys_to_pcie(bus->sysdata);
 	int ret;
 
-	if (!pp) {
-		BUG();
-		return -EINVAL;
-	}
-
 	if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) {
 		*val = 0xffffffff;
 		return PCIBIOS_DEVICE_NOT_FOUND;
@@ -736,11 +744,6 @@
 	struct pcie_port *pp = sys_to_pcie(bus->sysdata);
 	int ret;
 
-	if (!pp) {
-		BUG();
-		return -EINVAL;
-	}
-
 	if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0)
 		return PCIBIOS_DEVICE_NOT_FOUND;
 
@@ -768,19 +771,17 @@
 
 	pp = sys_to_pcie(sys);
 
-	if (!pp)
-		return 0;
-
-	if (global_io_offset < SZ_1M && pp->config.io_size > 0) {
-		sys->io_offset = global_io_offset - pp->config.io_bus_addr;
+	if (global_io_offset < SZ_1M && pp->io_size > 0) {
+		sys->io_offset = global_io_offset - pp->io_bus_addr;
 		pci_ioremap_io(global_io_offset, pp->io_base);
 		global_io_offset += SZ_64K;
 		pci_add_resource_offset(&sys->resources, &pp->io,
 					sys->io_offset);
 	}
 
-	sys->mem_offset = pp->mem.start - pp->config.mem_bus_addr;
+	sys->mem_offset = pp->mem.start - pp->mem_bus_addr;
 	pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset);
+	pci_add_resource(&sys->resources, &pp->busn);
 
 	return 1;
 }
@@ -790,14 +791,16 @@
 	struct pci_bus *bus;
 	struct pcie_port *pp = sys_to_pcie(sys);
 
-	if (pp) {
-		pp->root_bus_nr = sys->busnr;
-		bus = pci_scan_root_bus(pp->dev, sys->busnr, &dw_pcie_ops,
-					sys, &sys->resources);
-	} else {
-		bus = NULL;
-		BUG();
-	}
+	pp->root_bus_nr = sys->busnr;
+	bus = pci_create_root_bus(pp->dev, sys->busnr,
+				  &dw_pcie_ops, sys, &sys->resources);
+	if (!bus)
+		return NULL;
+
+	pci_scan_child_bus(bus);
+
+	if (bus && pp->ops->scan_bus)
+		pp->ops->scan_bus(pp);
 
 	return bus;
 }
@@ -833,7 +836,6 @@
 
 void dw_pcie_setup_rc(struct pcie_port *pp)
 {
-	struct pcie_port_info *config = &pp->config;
 	u32 val;
 	u32 membase;
 	u32 memlimit;
@@ -888,7 +890,7 @@
 
 	/* setup memory base, memory limit */
 	membase = ((u32)pp->mem_base & 0xfff00000) >> 16;
-	memlimit = (config->mem_size + (u32)pp->mem_base) & 0xfff00000;
+	memlimit = (pp->mem_size + (u32)pp->mem_base) & 0xfff00000;
 	val = memlimit | membase;
 	dw_pcie_writel_rc(pp, val, PCI_MEMORY_BASE);
 
diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h
index daf81f9..c625675 100644
--- a/drivers/pci/host/pcie-designware.h
+++ b/drivers/pci/host/pcie-designware.h
@@ -14,15 +14,6 @@
 #ifndef _PCIE_DESIGNWARE_H
 #define _PCIE_DESIGNWARE_H
 
-struct pcie_port_info {
-	u32		cfg0_size;
-	u32		cfg1_size;
-	u32		io_size;
-	u32		mem_size;
-	phys_addr_t	io_bus_addr;
-	phys_addr_t	mem_bus_addr;
-};
-
 /*
  * Maximum number of MSI IRQs can be 256 per controller. But keep
  * it 32 as of now. Probably we will never need more than 32. If needed,
@@ -38,17 +29,23 @@
 	u64			cfg0_base;
 	u64			cfg0_mod_base;
 	void __iomem		*va_cfg0_base;
+	u32			cfg0_size;
 	u64			cfg1_base;
 	u64			cfg1_mod_base;
 	void __iomem		*va_cfg1_base;
+	u32			cfg1_size;
 	u64			io_base;
 	u64			io_mod_base;
+	phys_addr_t		io_bus_addr;
+	u32			io_size;
 	u64			mem_base;
 	u64			mem_mod_base;
+	phys_addr_t		mem_bus_addr;
+	u32			mem_size;
 	struct resource		cfg;
 	struct resource		io;
 	struct resource		mem;
-	struct pcie_port_info	config;
+	struct resource		busn;
 	int			irq;
 	u32			lanes;
 	struct pcie_host_ops	*ops;
@@ -73,7 +70,10 @@
 	void (*host_init)(struct pcie_port *pp);
 	void (*msi_set_irq)(struct pcie_port *pp, int irq);
 	void (*msi_clear_irq)(struct pcie_port *pp, int irq);
-	u32 (*get_msi_data)(struct pcie_port *pp);
+	u32 (*get_msi_addr)(struct pcie_port *pp);
+	u32 (*get_msi_data)(struct pcie_port *pp, int pos);
+	void (*scan_bus)(struct pcie_port *pp);
+	int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip);
 };
 
 int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val);
diff --git a/drivers/pci/host/pcie-spear13xx.c b/drivers/pci/host/pcie-spear13xx.c
index 6dea9e4..85f594e 100644
--- a/drivers/pci/host/pcie-spear13xx.c
+++ b/drivers/pci/host/pcie-spear13xx.c
@@ -340,7 +340,7 @@
 
 	pp->dev = dev;
 
-	dbi_base = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "dbi");
 	pp->dbi_base = devm_ioremap_resource(dev, dbi_base);
 	if (IS_ERR(pp->dbi_base)) {
 		dev_err(dev, "couldn't remap dbi base %p\n", dbi_base);
diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c
new file mode 100644
index 0000000..ccc496b
--- /dev/null
+++ b/drivers/pci/host/pcie-xilinx.c
@@ -0,0 +1,970 @@
+/*
+ * PCIe host controller driver for Xilinx AXI PCIe Bridge
+ *
+ * Copyright (c) 2012 - 2014 Xilinx, Inc.
+ *
+ * Based on the Tegra PCIe driver
+ *
+ * Bits taken from Synopsys Designware Host controller driver and
+ * ARM PCI Host generic driver.
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/msi.h>
+#include <linux/of_address.h>
+#include <linux/of_pci.h>
+#include <linux/of_platform.h>
+#include <linux/of_irq.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+/* Register definitions */
+#define XILINX_PCIE_REG_BIR		0x00000130
+#define XILINX_PCIE_REG_IDR		0x00000138
+#define XILINX_PCIE_REG_IMR		0x0000013c
+#define XILINX_PCIE_REG_PSCR		0x00000144
+#define XILINX_PCIE_REG_RPSC		0x00000148
+#define XILINX_PCIE_REG_MSIBASE1	0x0000014c
+#define XILINX_PCIE_REG_MSIBASE2	0x00000150
+#define XILINX_PCIE_REG_RPEFR		0x00000154
+#define XILINX_PCIE_REG_RPIFR1		0x00000158
+#define XILINX_PCIE_REG_RPIFR2		0x0000015c
+
+/* Interrupt registers definitions */
+#define XILINX_PCIE_INTR_LINK_DOWN	BIT(0)
+#define XILINX_PCIE_INTR_ECRC_ERR	BIT(1)
+#define XILINX_PCIE_INTR_STR_ERR	BIT(2)
+#define XILINX_PCIE_INTR_HOT_RESET	BIT(3)
+#define XILINX_PCIE_INTR_CFG_TIMEOUT	BIT(8)
+#define XILINX_PCIE_INTR_CORRECTABLE	BIT(9)
+#define XILINX_PCIE_INTR_NONFATAL	BIT(10)
+#define XILINX_PCIE_INTR_FATAL		BIT(11)
+#define XILINX_PCIE_INTR_INTX		BIT(16)
+#define XILINX_PCIE_INTR_MSI		BIT(17)
+#define XILINX_PCIE_INTR_SLV_UNSUPP	BIT(20)
+#define XILINX_PCIE_INTR_SLV_UNEXP	BIT(21)
+#define XILINX_PCIE_INTR_SLV_COMPL	BIT(22)
+#define XILINX_PCIE_INTR_SLV_ERRP	BIT(23)
+#define XILINX_PCIE_INTR_SLV_CMPABT	BIT(24)
+#define XILINX_PCIE_INTR_SLV_ILLBUR	BIT(25)
+#define XILINX_PCIE_INTR_MST_DECERR	BIT(26)
+#define XILINX_PCIE_INTR_MST_SLVERR	BIT(27)
+#define XILINX_PCIE_INTR_MST_ERRP	BIT(28)
+#define XILINX_PCIE_IMR_ALL_MASK	0x1FF30FED
+#define XILINX_PCIE_IDR_ALL_MASK	0xFFFFFFFF
+
+/* Root Port Error FIFO Read Register definitions */
+#define XILINX_PCIE_RPEFR_ERR_VALID	BIT(18)
+#define XILINX_PCIE_RPEFR_REQ_ID	GENMASK(15, 0)
+#define XILINX_PCIE_RPEFR_ALL_MASK	0xFFFFFFFF
+
+/* Root Port Interrupt FIFO Read Register 1 definitions */
+#define XILINX_PCIE_RPIFR1_INTR_VALID	BIT(31)
+#define XILINX_PCIE_RPIFR1_MSI_INTR	BIT(30)
+#define XILINX_PCIE_RPIFR1_INTR_MASK	GENMASK(28, 27)
+#define XILINX_PCIE_RPIFR1_ALL_MASK	0xFFFFFFFF
+#define XILINX_PCIE_RPIFR1_INTR_SHIFT	27
+
+/* Bridge Info Register definitions */
+#define XILINX_PCIE_BIR_ECAM_SZ_MASK	GENMASK(18, 16)
+#define XILINX_PCIE_BIR_ECAM_SZ_SHIFT	16
+
+/* Root Port Interrupt FIFO Read Register 2 definitions */
+#define XILINX_PCIE_RPIFR2_MSG_DATA	GENMASK(15, 0)
+
+/* Root Port Status/control Register definitions */
+#define XILINX_PCIE_REG_RPSC_BEN	BIT(0)
+
+/* Phy Status/Control Register definitions */
+#define XILINX_PCIE_REG_PSCR_LNKUP	BIT(11)
+
+/* ECAM definitions */
+#define ECAM_BUS_NUM_SHIFT		20
+#define ECAM_DEV_NUM_SHIFT		12
+
+/* Number of MSI IRQs */
+#define XILINX_NUM_MSI_IRQS		128
+
+/* Number of Memory Resources */
+#define XILINX_MAX_NUM_RESOURCES	3
+
+/**
+ * struct xilinx_pcie_port - PCIe port information
+ * @reg_base: IO Mapped Register Base
+ * @irq: Interrupt number
+ * @msi_pages: MSI pages
+ * @root_busno: Root Bus number
+ * @dev: Device pointer
+ * @irq_domain: IRQ domain pointer
+ * @bus_range: Bus range
+ * @resources: Bus Resources
+ */
+struct xilinx_pcie_port {
+	void __iomem *reg_base;
+	u32 irq;
+	unsigned long msi_pages;
+	u8 root_busno;
+	struct device *dev;
+	struct irq_domain *irq_domain;
+	struct resource bus_range;
+	struct list_head resources;
+};
+
+static DECLARE_BITMAP(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
+
+static inline struct xilinx_pcie_port *sys_to_pcie(struct pci_sys_data *sys)
+{
+	return sys->private_data;
+}
+
+static inline u32 pcie_read(struct xilinx_pcie_port *port, u32 reg)
+{
+	return readl(port->reg_base + reg);
+}
+
+static inline void pcie_write(struct xilinx_pcie_port *port, u32 val, u32 reg)
+{
+	writel(val, port->reg_base + reg);
+}
+
+static inline bool xilinx_pcie_link_is_up(struct xilinx_pcie_port *port)
+{
+	return (pcie_read(port, XILINX_PCIE_REG_PSCR) &
+		XILINX_PCIE_REG_PSCR_LNKUP) ? 1 : 0;
+}
+
+/**
+ * xilinx_pcie_clear_err_interrupts - Clear Error Interrupts
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_clear_err_interrupts(struct xilinx_pcie_port *port)
+{
+	u32 val = pcie_read(port, XILINX_PCIE_REG_RPEFR);
+
+	if (val & XILINX_PCIE_RPEFR_ERR_VALID) {
+		dev_dbg(port->dev, "Requester ID %d\n",
+			val & XILINX_PCIE_RPEFR_REQ_ID);
+		pcie_write(port, XILINX_PCIE_RPEFR_ALL_MASK,
+			   XILINX_PCIE_REG_RPEFR);
+	}
+}
+
+/**
+ * xilinx_pcie_valid_device - Check if a valid device is present on bus
+ * @bus: PCI Bus structure
+ * @devfn: device/function
+ *
+ * Return: 'true' on success and 'false' if invalid device is found
+ */
+static bool xilinx_pcie_valid_device(struct pci_bus *bus, unsigned int devfn)
+{
+	struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
+
+	/* Check if link is up when trying to access downstream ports */
+	if (bus->number != port->root_busno)
+		if (!xilinx_pcie_link_is_up(port))
+			return false;
+
+	/* Only one device down on each root port */
+	if (bus->number == port->root_busno && devfn > 0)
+		return false;
+
+	/*
+	 * Do not read more than one device on the bus directly attached
+	 * to RC.
+	 */
+	if (bus->primary == port->root_busno && devfn > 0)
+		return false;
+
+	return true;
+}
+
+/**
+ * xilinx_pcie_config_base - Get configuration base
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ *
+ * Return: Base address of the configuration space needed to be
+ *	   accessed.
+ */
+static void __iomem *xilinx_pcie_config_base(struct pci_bus *bus,
+					     unsigned int devfn, int where)
+{
+	struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
+	int relbus;
+
+	relbus = (bus->number << ECAM_BUS_NUM_SHIFT) |
+		 (devfn << ECAM_DEV_NUM_SHIFT);
+
+	return port->reg_base + relbus + where;
+}
+
+/**
+ * xilinx_pcie_read_config - Read configuration space
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ * @size: Byte/word/dword
+ * @val: Value to be read
+ *
+ * Return: PCIBIOS_SUCCESSFUL on success
+ *	   PCIBIOS_DEVICE_NOT_FOUND on failure
+ */
+static int xilinx_pcie_read_config(struct pci_bus *bus, unsigned int devfn,
+				   int where, int size, u32 *val)
+{
+	void __iomem *addr;
+
+	if (!xilinx_pcie_valid_device(bus, devfn)) {
+		*val = 0xFFFFFFFF;
+		return PCIBIOS_DEVICE_NOT_FOUND;
+	}
+
+	addr = xilinx_pcie_config_base(bus, devfn, where);
+
+	switch (size) {
+	case 1:
+		*val = readb(addr);
+		break;
+	case 2:
+		*val = readw(addr);
+		break;
+	default:
+		*val = readl(addr);
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/**
+ * xilinx_pcie_write_config - Write configuration space
+ * @bus: PCI Bus structure
+ * @devfn: Device/function
+ * @where: Offset from base
+ * @size: Byte/word/dword
+ * @val: Value to be written to device
+ *
+ * Return: PCIBIOS_SUCCESSFUL on success
+ *	   PCIBIOS_DEVICE_NOT_FOUND on failure
+ */
+static int xilinx_pcie_write_config(struct pci_bus *bus, unsigned int devfn,
+				    int where, int size, u32 val)
+{
+	void __iomem *addr;
+
+	if (!xilinx_pcie_valid_device(bus, devfn))
+		return PCIBIOS_DEVICE_NOT_FOUND;
+
+	addr = xilinx_pcie_config_base(bus, devfn, where);
+
+	switch (size) {
+	case 1:
+		writeb(val, addr);
+		break;
+	case 2:
+		writew(val, addr);
+		break;
+	default:
+		writel(val, addr);
+		break;
+	}
+
+	return PCIBIOS_SUCCESSFUL;
+}
+
+/* PCIe operations */
+static struct pci_ops xilinx_pcie_ops = {
+	.read  = xilinx_pcie_read_config,
+	.write = xilinx_pcie_write_config,
+};
+
+/* MSI functions */
+
+/**
+ * xilinx_pcie_destroy_msi - Free MSI number
+ * @irq: IRQ to be freed
+ */
+static void xilinx_pcie_destroy_msi(unsigned int irq)
+{
+	struct irq_desc *desc;
+	struct msi_desc *msi;
+	struct xilinx_pcie_port *port;
+
+	desc = irq_to_desc(irq);
+	msi = irq_desc_get_msi_desc(desc);
+	port = sys_to_pcie(msi->dev->bus->sysdata);
+
+	if (!test_bit(irq, msi_irq_in_use))
+		dev_err(port->dev, "Trying to free unused MSI#%d\n", irq);
+	else
+		clear_bit(irq, msi_irq_in_use);
+}
+
+/**
+ * xilinx_pcie_assign_msi - Allocate MSI number
+ * @port: PCIe port structure
+ *
+ * Return: A valid IRQ on success and error value on failure.
+ */
+static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port)
+{
+	int pos;
+
+	pos = find_first_zero_bit(msi_irq_in_use, XILINX_NUM_MSI_IRQS);
+	if (pos < XILINX_NUM_MSI_IRQS)
+		set_bit(pos, msi_irq_in_use);
+	else
+		return -ENOSPC;
+
+	return pos;
+}
+
+/**
+ * xilinx_msi_teardown_irq - Destroy the MSI
+ * @chip: MSI Chip descriptor
+ * @irq: MSI IRQ to destroy
+ */
+static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq)
+{
+	xilinx_pcie_destroy_msi(irq);
+}
+
+/**
+ * xilinx_pcie_msi_setup_irq - Setup MSI request
+ * @chip: MSI chip pointer
+ * @pdev: PCIe device pointer
+ * @desc: MSI descriptor pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip,
+				     struct pci_dev *pdev,
+				     struct msi_desc *desc)
+{
+	struct xilinx_pcie_port *port = sys_to_pcie(pdev->bus->sysdata);
+	unsigned int irq;
+	int hwirq;
+	struct msi_msg msg;
+	phys_addr_t msg_addr;
+
+	hwirq = xilinx_pcie_assign_msi(port);
+	if (hwirq < 0)
+		return hwirq;
+
+	irq = irq_create_mapping(port->irq_domain, hwirq);
+	if (!irq)
+		return -EINVAL;
+
+	irq_set_msi_desc(irq, desc);
+
+	msg_addr = virt_to_phys((void *)port->msi_pages);
+
+	msg.address_hi = 0;
+	msg.address_lo = msg_addr;
+	msg.data = irq;
+
+	write_msi_msg(irq, &msg);
+
+	return 0;
+}
+
+/* MSI Chip Descriptor */
+static struct msi_chip xilinx_pcie_msi_chip = {
+	.setup_irq = xilinx_pcie_msi_setup_irq,
+	.teardown_irq = xilinx_msi_teardown_irq,
+};
+
+/* HW Interrupt Chip Descriptor */
+static struct irq_chip xilinx_msi_irq_chip = {
+	.name = "Xilinx PCIe MSI",
+	.irq_enable = unmask_msi_irq,
+	.irq_disable = mask_msi_irq,
+	.irq_mask = mask_msi_irq,
+	.irq_unmask = unmask_msi_irq,
+};
+
+/**
+ * xilinx_pcie_msi_map - Set the handler for the MSI and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq,
+			       irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+	set_irq_flags(irq, IRQF_VALID);
+
+	return 0;
+}
+
+/* IRQ Domain operations */
+static const struct irq_domain_ops msi_domain_ops = {
+	.map = xilinx_pcie_msi_map,
+};
+
+/**
+ * xilinx_pcie_enable_msi - Enable MSI support
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port)
+{
+	phys_addr_t msg_addr;
+
+	port->msi_pages = __get_free_pages(GFP_KERNEL, 0);
+	msg_addr = virt_to_phys((void *)port->msi_pages);
+	pcie_write(port, 0x0, XILINX_PCIE_REG_MSIBASE1);
+	pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2);
+}
+
+/**
+ * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus
+ * @bus: PCIe bus
+ */
+static void xilinx_pcie_add_bus(struct pci_bus *bus)
+{
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata);
+
+		xilinx_pcie_msi_chip.dev = port->dev;
+		bus->msi = &xilinx_pcie_msi_chip;
+	}
+}
+
+/* INTx Functions */
+
+/**
+ * xilinx_pcie_intx_map - Set the handler for the INTx and mark IRQ as valid
+ * @domain: IRQ domain
+ * @irq: Virtual IRQ number
+ * @hwirq: HW interrupt number
+ *
+ * Return: Always returns 0.
+ */
+static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq,
+				irq_hw_number_t hwirq)
+{
+	irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq);
+	irq_set_chip_data(irq, domain->host_data);
+	set_irq_flags(irq, IRQF_VALID);
+
+	return 0;
+}
+
+/* INTx IRQ Domain operations */
+static const struct irq_domain_ops intx_domain_ops = {
+	.map = xilinx_pcie_intx_map,
+};
+
+/* PCIe HW Functions */
+
+/**
+ * xilinx_pcie_intr_handler - Interrupt Service Handler
+ * @irq: IRQ number
+ * @data: PCIe port information
+ *
+ * Return: IRQ_HANDLED on success and IRQ_NONE on failure
+ */
+static irqreturn_t xilinx_pcie_intr_handler(int irq, void *data)
+{
+	struct xilinx_pcie_port *port = (struct xilinx_pcie_port *)data;
+	u32 val, mask, status, msi_data;
+
+	/* Read interrupt decode and mask registers */
+	val = pcie_read(port, XILINX_PCIE_REG_IDR);
+	mask = pcie_read(port, XILINX_PCIE_REG_IMR);
+
+	status = val & mask;
+	if (!status)
+		return IRQ_NONE;
+
+	if (status & XILINX_PCIE_INTR_LINK_DOWN)
+		dev_warn(port->dev, "Link Down\n");
+
+	if (status & XILINX_PCIE_INTR_ECRC_ERR)
+		dev_warn(port->dev, "ECRC failed\n");
+
+	if (status & XILINX_PCIE_INTR_STR_ERR)
+		dev_warn(port->dev, "Streaming error\n");
+
+	if (status & XILINX_PCIE_INTR_HOT_RESET)
+		dev_info(port->dev, "Hot reset\n");
+
+	if (status & XILINX_PCIE_INTR_CFG_TIMEOUT)
+		dev_warn(port->dev, "ECAM access timeout\n");
+
+	if (status & XILINX_PCIE_INTR_CORRECTABLE) {
+		dev_warn(port->dev, "Correctable error message\n");
+		xilinx_pcie_clear_err_interrupts(port);
+	}
+
+	if (status & XILINX_PCIE_INTR_NONFATAL) {
+		dev_warn(port->dev, "Non fatal error message\n");
+		xilinx_pcie_clear_err_interrupts(port);
+	}
+
+	if (status & XILINX_PCIE_INTR_FATAL) {
+		dev_warn(port->dev, "Fatal error message\n");
+		xilinx_pcie_clear_err_interrupts(port);
+	}
+
+	if (status & XILINX_PCIE_INTR_INTX) {
+		/* INTx interrupt received */
+		val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+
+		/* Check whether interrupt valid */
+		if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+			dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+			return IRQ_HANDLED;
+		}
+
+		/* Clear interrupt FIFO register 1 */
+		pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+			   XILINX_PCIE_REG_RPIFR1);
+
+		/* Handle INTx Interrupt */
+		val = ((val & XILINX_PCIE_RPIFR1_INTR_MASK) >>
+			XILINX_PCIE_RPIFR1_INTR_SHIFT) + 1;
+		generic_handle_irq(irq_find_mapping(port->irq_domain, val));
+	}
+
+	if (status & XILINX_PCIE_INTR_MSI) {
+		/* MSI Interrupt */
+		val = pcie_read(port, XILINX_PCIE_REG_RPIFR1);
+
+		if (!(val & XILINX_PCIE_RPIFR1_INTR_VALID)) {
+			dev_warn(port->dev, "RP Intr FIFO1 read error\n");
+			return IRQ_HANDLED;
+		}
+
+		if (val & XILINX_PCIE_RPIFR1_MSI_INTR) {
+			msi_data = pcie_read(port, XILINX_PCIE_REG_RPIFR2) &
+				   XILINX_PCIE_RPIFR2_MSG_DATA;
+
+			/* Clear interrupt FIFO register 1 */
+			pcie_write(port, XILINX_PCIE_RPIFR1_ALL_MASK,
+				   XILINX_PCIE_REG_RPIFR1);
+
+			if (IS_ENABLED(CONFIG_PCI_MSI)) {
+				/* Handle MSI Interrupt */
+				generic_handle_irq(msi_data);
+			}
+		}
+	}
+
+	if (status & XILINX_PCIE_INTR_SLV_UNSUPP)
+		dev_warn(port->dev, "Slave unsupported request\n");
+
+	if (status & XILINX_PCIE_INTR_SLV_UNEXP)
+		dev_warn(port->dev, "Slave unexpected completion\n");
+
+	if (status & XILINX_PCIE_INTR_SLV_COMPL)
+		dev_warn(port->dev, "Slave completion timeout\n");
+
+	if (status & XILINX_PCIE_INTR_SLV_ERRP)
+		dev_warn(port->dev, "Slave Error Poison\n");
+
+	if (status & XILINX_PCIE_INTR_SLV_CMPABT)
+		dev_warn(port->dev, "Slave Completer Abort\n");
+
+	if (status & XILINX_PCIE_INTR_SLV_ILLBUR)
+		dev_warn(port->dev, "Slave Illegal Burst\n");
+
+	if (status & XILINX_PCIE_INTR_MST_DECERR)
+		dev_warn(port->dev, "Master decode error\n");
+
+	if (status & XILINX_PCIE_INTR_MST_SLVERR)
+		dev_warn(port->dev, "Master slave error\n");
+
+	if (status & XILINX_PCIE_INTR_MST_ERRP)
+		dev_warn(port->dev, "Master error poison\n");
+
+	/* Clear the Interrupt Decode register */
+	pcie_write(port, status, XILINX_PCIE_REG_IDR);
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * xilinx_pcie_free_irq_domain - Free IRQ domain
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_free_irq_domain(struct xilinx_pcie_port *port)
+{
+	int i;
+	u32 irq, num_irqs;
+
+	/* Free IRQ Domain */
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+
+		free_pages(port->msi_pages, 0);
+
+		num_irqs = XILINX_NUM_MSI_IRQS;
+	} else {
+		/* INTx */
+		num_irqs = 4;
+	}
+
+	for (i = 0; i < num_irqs; i++) {
+		irq = irq_find_mapping(port->irq_domain, i);
+		if (irq > 0)
+			irq_dispose_mapping(irq);
+	}
+
+	irq_domain_remove(port->irq_domain);
+}
+
+/**
+ * xilinx_pcie_init_irq_domain - Initialize IRQ domain
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_init_irq_domain(struct xilinx_pcie_port *port)
+{
+	struct device *dev = port->dev;
+	struct device_node *node = dev->of_node;
+	struct device_node *pcie_intc_node;
+
+	/* Setup INTx */
+	pcie_intc_node = of_get_next_child(node, NULL);
+	if (!pcie_intc_node) {
+		dev_err(dev, "No PCIe Intc node found\n");
+		return PTR_ERR(pcie_intc_node);
+	}
+
+	port->irq_domain = irq_domain_add_linear(pcie_intc_node, 4,
+						 &intx_domain_ops,
+						 port);
+	if (!port->irq_domain) {
+		dev_err(dev, "Failed to get a INTx IRQ domain\n");
+		return PTR_ERR(port->irq_domain);
+	}
+
+	/* Setup MSI */
+	if (IS_ENABLED(CONFIG_PCI_MSI)) {
+		port->irq_domain = irq_domain_add_linear(node,
+							 XILINX_NUM_MSI_IRQS,
+							 &msi_domain_ops,
+							 &xilinx_pcie_msi_chip);
+		if (!port->irq_domain) {
+			dev_err(dev, "Failed to get a MSI IRQ domain\n");
+			return PTR_ERR(port->irq_domain);
+		}
+
+		xilinx_pcie_enable_msi(port);
+	}
+
+	return 0;
+}
+
+/**
+ * xilinx_pcie_init_port - Initialize hardware
+ * @port: PCIe port information
+ */
+static void xilinx_pcie_init_port(struct xilinx_pcie_port *port)
+{
+	if (xilinx_pcie_link_is_up(port))
+		dev_info(port->dev, "PCIe Link is UP\n");
+	else
+		dev_info(port->dev, "PCIe Link is DOWN\n");
+
+	/* Disable all interrupts */
+	pcie_write(port, ~XILINX_PCIE_IDR_ALL_MASK,
+		   XILINX_PCIE_REG_IMR);
+
+	/* Clear pending interrupts */
+	pcie_write(port, pcie_read(port, XILINX_PCIE_REG_IDR) &
+			 XILINX_PCIE_IMR_ALL_MASK,
+		   XILINX_PCIE_REG_IDR);
+
+	/* Enable all interrupts */
+	pcie_write(port, XILINX_PCIE_IMR_ALL_MASK, XILINX_PCIE_REG_IMR);
+
+	/* Enable the Bridge enable bit */
+	pcie_write(port, pcie_read(port, XILINX_PCIE_REG_RPSC) |
+			 XILINX_PCIE_REG_RPSC_BEN,
+		   XILINX_PCIE_REG_RPSC);
+}
+
+/**
+ * xilinx_pcie_setup - Setup memory resources
+ * @nr: Bus number
+ * @sys: Per controller structure
+ *
+ * Return: '1' on success and error value on failure
+ */
+static int xilinx_pcie_setup(int nr, struct pci_sys_data *sys)
+{
+	struct xilinx_pcie_port *port = sys_to_pcie(sys);
+
+	list_splice_init(&port->resources, &sys->resources);
+
+	return 1;
+}
+
+/**
+ * xilinx_pcie_scan_bus - Scan PCIe bus for devices
+ * @nr: Bus number
+ * @sys: Per controller structure
+ *
+ * Return: Valid Bus pointer on success and NULL on failure
+ */
+static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys)
+{
+	struct xilinx_pcie_port *port = sys_to_pcie(sys);
+	struct pci_bus *bus;
+
+	port->root_busno = sys->busnr;
+	bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops,
+				sys, &sys->resources);
+
+	return bus;
+}
+
+/**
+ * xilinx_pcie_parse_and_add_res - Add resources by parsing ranges
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port)
+{
+	struct device *dev = port->dev;
+	struct device_node *node = dev->of_node;
+	struct resource *mem;
+	resource_size_t offset;
+	struct of_pci_range_parser parser;
+	struct of_pci_range range;
+	struct pci_host_bridge_window *win;
+	int err = 0, mem_resno = 0;
+
+	/* Get the ranges */
+	if (of_pci_range_parser_init(&parser, node)) {
+		dev_err(dev, "missing \"ranges\" property\n");
+		return -EINVAL;
+	}
+
+	/* Parse the ranges and add the resources found to the list */
+	for_each_of_pci_range(&parser, &range) {
+
+		if (mem_resno >= XILINX_MAX_NUM_RESOURCES) {
+			dev_err(dev, "Maximum memory resources exceeded\n");
+			return -EINVAL;
+		}
+
+		mem = devm_kmalloc(dev, sizeof(*mem), GFP_KERNEL);
+		if (!mem) {
+			err = -ENOMEM;
+			goto free_resources;
+		}
+
+		of_pci_range_to_resource(&range, node, mem);
+
+		switch (mem->flags & IORESOURCE_TYPE_BITS) {
+		case IORESOURCE_MEM:
+			offset = range.cpu_addr - range.pci_addr;
+			mem_resno++;
+			break;
+		default:
+			err = -EINVAL;
+			break;
+		}
+
+		if (err < 0) {
+			dev_warn(dev, "Invalid resource found %pR\n", mem);
+			continue;
+		}
+
+		err = request_resource(&iomem_resource, mem);
+		if (err)
+			goto free_resources;
+
+		pci_add_resource_offset(&port->resources, mem, offset);
+	}
+
+	/* Get the bus range */
+	if (of_pci_parse_bus_range(node, &port->bus_range)) {
+		u32 val = pcie_read(port, XILINX_PCIE_REG_BIR);
+		u8 last;
+
+		last = (val & XILINX_PCIE_BIR_ECAM_SZ_MASK) >>
+			XILINX_PCIE_BIR_ECAM_SZ_SHIFT;
+
+		port->bus_range = (struct resource) {
+			.name	= node->name,
+			.start	= 0,
+			.end	= last,
+			.flags	= IORESOURCE_BUS,
+		};
+	}
+
+	/* Register bus resource */
+	pci_add_resource(&port->resources, &port->bus_range);
+
+	return 0;
+
+free_resources:
+	release_child_resources(&iomem_resource);
+	list_for_each_entry(win, &port->resources, list)
+		devm_kfree(dev, win->res);
+	pci_free_resource_list(&port->resources);
+
+	return err;
+}
+
+/**
+ * xilinx_pcie_parse_dt - Parse Device tree
+ * @port: PCIe port information
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_parse_dt(struct xilinx_pcie_port *port)
+{
+	struct device *dev = port->dev;
+	struct device_node *node = dev->of_node;
+	struct resource regs;
+	const char *type;
+	int err;
+
+	type = of_get_property(node, "device_type", NULL);
+	if (!type || strcmp(type, "pci")) {
+		dev_err(dev, "invalid \"device_type\" %s\n", type);
+		return -EINVAL;
+	}
+
+	err = of_address_to_resource(node, 0, &regs);
+	if (err) {
+		dev_err(dev, "missing \"reg\" property\n");
+		return err;
+	}
+
+	port->reg_base = devm_ioremap_resource(dev, &regs);
+	if (IS_ERR(port->reg_base))
+		return PTR_ERR(port->reg_base);
+
+	port->irq = irq_of_parse_and_map(node, 0);
+	err = devm_request_irq(dev, port->irq, xilinx_pcie_intr_handler,
+			       IRQF_SHARED, "xilinx-pcie", port);
+	if (err) {
+		dev_err(dev, "unable to request irq %d\n", port->irq);
+		return err;
+	}
+
+	return 0;
+}
+
+/**
+ * xilinx_pcie_probe - Probe function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' on success and error value on failure
+ */
+static int xilinx_pcie_probe(struct platform_device *pdev)
+{
+	struct xilinx_pcie_port *port;
+	struct hw_pci hw;
+	struct device *dev = &pdev->dev;
+	int err;
+
+	if (!dev->of_node)
+		return -ENODEV;
+
+	port = devm_kzalloc(dev, sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	port->dev = dev;
+
+	err = xilinx_pcie_parse_dt(port);
+	if (err) {
+		dev_err(dev, "Parsing DT failed\n");
+		return err;
+	}
+
+	xilinx_pcie_init_port(port);
+
+	err = xilinx_pcie_init_irq_domain(port);
+	if (err) {
+		dev_err(dev, "Failed creating IRQ Domain\n");
+		return err;
+	}
+
+	/*
+	 * Parse PCI ranges, configuration bus range and
+	 * request their resources
+	 */
+	INIT_LIST_HEAD(&port->resources);
+	err = xilinx_pcie_parse_and_add_res(port);
+	if (err) {
+		dev_err(dev, "Failed adding resources\n");
+		return err;
+	}
+
+	platform_set_drvdata(pdev, port);
+
+	/* Register the device */
+	memset(&hw, 0, sizeof(hw));
+	hw = (struct hw_pci) {
+		.nr_controllers	= 1,
+		.private_data	= (void **)&port,
+		.setup		= xilinx_pcie_setup,
+		.map_irq	= of_irq_parse_and_map_pci,
+		.add_bus	= xilinx_pcie_add_bus,
+		.scan		= xilinx_pcie_scan_bus,
+		.ops		= &xilinx_pcie_ops,
+	};
+	pci_common_init_dev(dev, &hw);
+
+	return 0;
+}
+
+/**
+ * xilinx_pcie_remove - Remove function
+ * @pdev: Platform device pointer
+ *
+ * Return: '0' always
+ */
+static int xilinx_pcie_remove(struct platform_device *pdev)
+{
+	struct xilinx_pcie_port *port = platform_get_drvdata(pdev);
+
+	xilinx_pcie_free_irq_domain(port);
+
+	return 0;
+}
+
+static struct of_device_id xilinx_pcie_of_match[] = {
+	{ .compatible = "xlnx,axi-pcie-host-1.00.a", },
+	{}
+};
+
+static struct platform_driver xilinx_pcie_driver = {
+	.driver = {
+		.name = "xilinx-pcie",
+		.owner = THIS_MODULE,
+		.of_match_table = xilinx_pcie_of_match,
+		.suppress_bind_attrs = true,
+	},
+	.probe = xilinx_pcie_probe,
+	.remove = xilinx_pcie_remove,
+};
+module_platform_driver(xilinx_pcie_driver);
+
+MODULE_AUTHOR("Xilinx Inc");
+MODULE_DESCRIPTION("Xilinx AXI PCIe driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/hotplug/Makefile b/drivers/pci/hotplug/Makefile
index 3e6532b..4a9aa08 100644
--- a/drivers/pci/hotplug/Makefile
+++ b/drivers/pci/hotplug/Makefile
@@ -24,7 +24,7 @@
 
 obj-$(CONFIG_HOTPLUG_PCI_ACPI_IBM)	+= acpiphp_ibm.o
 
-pci_hotplug-objs	:=	pci_hotplug_core.o pcihp_slot.o
+pci_hotplug-objs	:=	pci_hotplug_core.o
 
 ifdef CONFIG_HOTPLUG_PCI_CPCI
 pci_hotplug-objs	+=	cpci_hotplug_core.o	\
diff --git a/drivers/pci/hotplug/acpi_pcihp.c b/drivers/pci/hotplug/acpi_pcihp.c
index a94d850..876ccc6 100644
--- a/drivers/pci/hotplug/acpi_pcihp.c
+++ b/drivers/pci/hotplug/acpi_pcihp.c
@@ -46,215 +46,6 @@
 
 static bool debug_acpi;
 
-static acpi_status
-decode_type0_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
-{
-	int i;
-	union acpi_object *fields = record->package.elements;
-	u32 revision = fields[1].integer.value;
-
-	switch (revision) {
-	case 1:
-		if (record->package.count != 6)
-			return AE_ERROR;
-		for (i = 2; i < 6; i++)
-			if (fields[i].type != ACPI_TYPE_INTEGER)
-				return AE_ERROR;
-		hpx->t0 = &hpx->type0_data;
-		hpx->t0->revision        = revision;
-		hpx->t0->cache_line_size = fields[2].integer.value;
-		hpx->t0->latency_timer   = fields[3].integer.value;
-		hpx->t0->enable_serr     = fields[4].integer.value;
-		hpx->t0->enable_perr     = fields[5].integer.value;
-		break;
-	default:
-		printk(KERN_WARNING
-		       "%s: Type 0 Revision %d record not supported\n",
-		       __func__, revision);
-		return AE_ERROR;
-	}
-	return AE_OK;
-}
-
-static acpi_status
-decode_type1_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
-{
-	int i;
-	union acpi_object *fields = record->package.elements;
-	u32 revision = fields[1].integer.value;
-
-	switch (revision) {
-	case 1:
-		if (record->package.count != 5)
-			return AE_ERROR;
-		for (i = 2; i < 5; i++)
-			if (fields[i].type != ACPI_TYPE_INTEGER)
-				return AE_ERROR;
-		hpx->t1 = &hpx->type1_data;
-		hpx->t1->revision      = revision;
-		hpx->t1->max_mem_read  = fields[2].integer.value;
-		hpx->t1->avg_max_split = fields[3].integer.value;
-		hpx->t1->tot_max_split = fields[4].integer.value;
-		break;
-	default:
-		printk(KERN_WARNING
-		       "%s: Type 1 Revision %d record not supported\n",
-		       __func__, revision);
-		return AE_ERROR;
-	}
-	return AE_OK;
-}
-
-static acpi_status
-decode_type2_hpx_record(union acpi_object *record, struct hotplug_params *hpx)
-{
-	int i;
-	union acpi_object *fields = record->package.elements;
-	u32 revision = fields[1].integer.value;
-
-	switch (revision) {
-	case 1:
-		if (record->package.count != 18)
-			return AE_ERROR;
-		for (i = 2; i < 18; i++)
-			if (fields[i].type != ACPI_TYPE_INTEGER)
-				return AE_ERROR;
-		hpx->t2 = &hpx->type2_data;
-		hpx->t2->revision      = revision;
-		hpx->t2->unc_err_mask_and      = fields[2].integer.value;
-		hpx->t2->unc_err_mask_or       = fields[3].integer.value;
-		hpx->t2->unc_err_sever_and     = fields[4].integer.value;
-		hpx->t2->unc_err_sever_or      = fields[5].integer.value;
-		hpx->t2->cor_err_mask_and      = fields[6].integer.value;
-		hpx->t2->cor_err_mask_or       = fields[7].integer.value;
-		hpx->t2->adv_err_cap_and       = fields[8].integer.value;
-		hpx->t2->adv_err_cap_or        = fields[9].integer.value;
-		hpx->t2->pci_exp_devctl_and    = fields[10].integer.value;
-		hpx->t2->pci_exp_devctl_or     = fields[11].integer.value;
-		hpx->t2->pci_exp_lnkctl_and    = fields[12].integer.value;
-		hpx->t2->pci_exp_lnkctl_or     = fields[13].integer.value;
-		hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
-		hpx->t2->sec_unc_err_sever_or  = fields[15].integer.value;
-		hpx->t2->sec_unc_err_mask_and  = fields[16].integer.value;
-		hpx->t2->sec_unc_err_mask_or   = fields[17].integer.value;
-		break;
-	default:
-		printk(KERN_WARNING
-		       "%s: Type 2 Revision %d record not supported\n",
-		       __func__, revision);
-		return AE_ERROR;
-	}
-	return AE_OK;
-}
-
-static acpi_status
-acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
-{
-	acpi_status status;
-	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
-	union acpi_object *package, *record, *fields;
-	u32 type;
-	int i;
-
-	/* Clear the return buffer with zeros */
-	memset(hpx, 0, sizeof(struct hotplug_params));
-
-	status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
-	if (ACPI_FAILURE(status))
-		return status;
-
-	package = (union acpi_object *)buffer.pointer;
-	if (package->type != ACPI_TYPE_PACKAGE) {
-		status = AE_ERROR;
-		goto exit;
-	}
-
-	for (i = 0; i < package->package.count; i++) {
-		record = &package->package.elements[i];
-		if (record->type != ACPI_TYPE_PACKAGE) {
-			status = AE_ERROR;
-			goto exit;
-		}
-
-		fields = record->package.elements;
-		if (fields[0].type != ACPI_TYPE_INTEGER ||
-		    fields[1].type != ACPI_TYPE_INTEGER) {
-			status = AE_ERROR;
-			goto exit;
-		}
-
-		type = fields[0].integer.value;
-		switch (type) {
-		case 0:
-			status = decode_type0_hpx_record(record, hpx);
-			if (ACPI_FAILURE(status))
-				goto exit;
-			break;
-		case 1:
-			status = decode_type1_hpx_record(record, hpx);
-			if (ACPI_FAILURE(status))
-				goto exit;
-			break;
-		case 2:
-			status = decode_type2_hpx_record(record, hpx);
-			if (ACPI_FAILURE(status))
-				goto exit;
-			break;
-		default:
-			printk(KERN_ERR "%s: Type %d record not supported\n",
-			       __func__, type);
-			status = AE_ERROR;
-			goto exit;
-		}
-	}
- exit:
-	kfree(buffer.pointer);
-	return status;
-}
-
-static acpi_status
-acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
-{
-	acpi_status status;
-	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
-	union acpi_object *package, *fields;
-	int i;
-
-	memset(hpp, 0, sizeof(struct hotplug_params));
-
-	status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
-	if (ACPI_FAILURE(status))
-		return status;
-
-	package = (union acpi_object *) buffer.pointer;
-	if (package->type != ACPI_TYPE_PACKAGE ||
-	    package->package.count != 4) {
-		status = AE_ERROR;
-		goto exit;
-	}
-
-	fields = package->package.elements;
-	for (i = 0; i < 4; i++) {
-		if (fields[i].type != ACPI_TYPE_INTEGER) {
-			status = AE_ERROR;
-			goto exit;
-		}
-	}
-
-	hpp->t0 = &hpp->type0_data;
-	hpp->t0->revision        = 1;
-	hpp->t0->cache_line_size = fields[0].integer.value;
-	hpp->t0->latency_timer   = fields[1].integer.value;
-	hpp->t0->enable_serr     = fields[2].integer.value;
-	hpp->t0->enable_perr     = fields[3].integer.value;
-
-exit:
-	kfree(buffer.pointer);
-	return status;
-}
-
-
-
 /* acpi_run_oshp - get control of hotplug from the firmware
  *
  * @handle - the handle of the hotplug controller.
@@ -283,48 +74,6 @@
 	return status;
 }
 
-/* pci_get_hp_params
- *
- * @dev - the pci_dev for which we want parameters
- * @hpp - allocated by the caller
- */
-int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
-{
-	acpi_status status;
-	acpi_handle handle, phandle;
-	struct pci_bus *pbus;
-
-	handle = NULL;
-	for (pbus = dev->bus; pbus; pbus = pbus->parent) {
-		handle = acpi_pci_get_bridge_handle(pbus);
-		if (handle)
-			break;
-	}
-
-	/*
-	 * _HPP settings apply to all child buses, until another _HPP is
-	 * encountered. If we don't find an _HPP for the input pci dev,
-	 * look for it in the parent device scope since that would apply to
-	 * this pci dev.
-	 */
-	while (handle) {
-		status = acpi_run_hpx(handle, hpp);
-		if (ACPI_SUCCESS(status))
-			return 0;
-		status = acpi_run_hpp(handle, hpp);
-		if (ACPI_SUCCESS(status))
-			return 0;
-		if (acpi_is_root_bridge(handle))
-			break;
-		status = acpi_get_parent(handle, &phandle);
-		if (ACPI_FAILURE(status))
-			break;
-		handle = phandle;
-	}
-	return -ENODEV;
-}
-EXPORT_SYMBOL_GPL(pci_get_hp_params);
-
 /**
  * acpi_get_hp_hw_control_from_firmware
  * @dev: the pci_dev of the bridge that has a hotplug controller
@@ -433,7 +182,8 @@
 {
 	acpi_handle bridge_handle, parent_handle;
 
-	if (!(bridge_handle = acpi_pci_get_bridge_handle(pbus)))
+	bridge_handle = acpi_pci_get_bridge_handle(pbus);
+	if (!bridge_handle)
 		return 0;
 	if ((ACPI_FAILURE(acpi_get_parent(handle, &parent_handle))))
 		return 0;
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 70741c8..a6f8e0b 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -61,7 +61,6 @@
 static int acpiphp_hotplug_notify(struct acpi_device *adev, u32 type);
 static void acpiphp_post_dock_fixup(struct acpi_device *adev);
 static void acpiphp_sanitize_bus(struct pci_bus *bus);
-static void acpiphp_set_hpp_values(struct pci_bus *bus);
 static void hotplug_event(u32 type, struct acpiphp_context *context);
 static void free_bridge(struct kref *kref);
 
@@ -510,7 +509,7 @@
 	__pci_bus_assign_resources(bus, &add_list, NULL);
 
 	acpiphp_sanitize_bus(bus);
-	acpiphp_set_hpp_values(bus);
+	pcie_bus_configure_settings(bus);
 	acpiphp_set_acpi_region(slot);
 
 	list_for_each_entry(dev, &bus->devices, bus_list) {
@@ -702,14 +701,6 @@
 	}
 }
 
-static void acpiphp_set_hpp_values(struct pci_bus *bus)
-{
-	struct pci_dev *dev;
-
-	list_for_each_entry(dev, &bus->devices, bus_list)
-		pci_configure_slot(dev);
-}
-
 /*
  * Remove devices for which we could not assign resources, call
  * arch specific code to fix-up the bus
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index 8dcccff..6ca2399 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -302,7 +302,7 @@
 		goto read_table_done;
 	}
 
-	for(size = 0, i = 0; i < package->package.count; i++) {
+	for (size = 0, i = 0; i < package->package.count; i++) {
 		if (package->package.elements[i].type != ACPI_TYPE_BUFFER) {
 			pr_err("%s:  Invalid APCI element %d\n", __func__, i);
 			goto read_table_done;
diff --git a/drivers/pci/hotplug/cpci_hotplug_core.c b/drivers/pci/hotplug/cpci_hotplug_core.c
index e09cf78..a5a7fd8 100644
--- a/drivers/pci/hotplug/cpci_hotplug_core.c
+++ b/drivers/pci/hotplug/cpci_hotplug_core.c
@@ -125,7 +125,8 @@
 
 	/* Unconfigure device */
 	dbg("%s - unconfiguring slot %s", __func__, slot_name(slot));
-	if ((retval = cpci_unconfigure_slot(slot))) {
+	retval = cpci_unconfigure_slot(slot);
+	if (retval) {
 		err("%s - could not unconfigure slot %s",
 		    __func__, slot_name(slot));
 		goto disable_error;
@@ -141,9 +142,11 @@
 	}
 	cpci_led_on(slot);
 
-	if (controller->ops->set_power)
-		if ((retval = controller->ops->set_power(slot, 0)))
+	if (controller->ops->set_power) {
+		retval = controller->ops->set_power(slot, 0);
+		if (retval)
 			goto disable_error;
+	}
 
 	if (update_adapter_status(slot->hotplug_slot, 0))
 		warn("failure to update adapter file");
@@ -467,9 +470,9 @@
 			    __func__, slot_name(slot), hs_csr);
 
 			if (!slot->extracting) {
-				if (update_latch_status(slot->hotplug_slot, 0)) {
+				if (update_latch_status(slot->hotplug_slot, 0))
 					warn("failure to update latch file");
-				}
+
 				slot->extracting = 1;
 				atomic_inc(&extracting);
 			}
diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
index 04fcd78..66b7bbe 100644
--- a/drivers/pci/hotplug/cpcihp_generic.c
+++ b/drivers/pci/hotplug/cpcihp_generic.c
@@ -56,7 +56,7 @@
 		if (debug)					\
 			printk (KERN_DEBUG "%s: " format "\n",	\
 				MY_NAME , ## arg);		\
-	} while(0)
+	} while (0)
 #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
 #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
 #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@@ -82,28 +82,28 @@
 	char *p;
 	unsigned long tmp;
 
-	if(!bridge) {
+	if (!bridge) {
 		info("not configured, disabling.");
 		return -EINVAL;
 	}
 	str = bridge;
-	if(!*str)
+	if (!*str)
 		return -EINVAL;
 
 	tmp = simple_strtoul(str, &p, 16);
-	if(p == str || tmp > 0xff) {
+	if (p == str || tmp > 0xff) {
 		err("Invalid hotplug bus bridge device bus number");
 		return -EINVAL;
 	}
 	bridge_busnr = (u8) tmp;
 	dbg("bridge_busnr = 0x%02x", bridge_busnr);
-	if(*p != ':') {
+	if (*p != ':') {
 		err("Invalid hotplug bus bridge device");
 		return -EINVAL;
 	}
 	str = p + 1;
 	tmp = simple_strtoul(str, &p, 16);
-	if(p == str || tmp > 0x1f) {
+	if (p == str || tmp > 0x1f) {
 		err("Invalid hotplug bus bridge device slot number");
 		return -EINVAL;
 	}
@@ -112,18 +112,18 @@
 
 	dbg("first_slot = 0x%02x", first_slot);
 	dbg("last_slot = 0x%02x", last_slot);
-	if(!(first_slot && last_slot)) {
+	if (!(first_slot && last_slot)) {
 		err("Need to specify first_slot and last_slot");
 		return -EINVAL;
 	}
-	if(last_slot < first_slot) {
+	if (last_slot < first_slot) {
 		err("first_slot must be less than last_slot");
 		return -EINVAL;
 	}
 
 	dbg("port = 0x%04x", port);
 	dbg("enum_bit = 0x%02x", enum_bit);
-	if(enum_bit > 7) {
+	if (enum_bit > 7) {
 		err("Invalid #ENUM bit");
 		return -EINVAL;
 	}
@@ -151,12 +151,12 @@
 		return status;
 
 	r = request_region(port, 1, "#ENUM hotswap signal register");
-	if(!r)
+	if (!r)
 		return -EBUSY;
 
 	dev = pci_get_domain_bus_and_slot(0, bridge_busnr,
 					  PCI_DEVFN(bridge_slot, 0));
-	if(!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
+	if (!dev || dev->hdr_type != PCI_HEADER_TYPE_BRIDGE) {
 		err("Invalid bridge device %s", bridge);
 		pci_dev_put(dev);
 		return -EINVAL;
@@ -169,21 +169,21 @@
 	generic_hpc.ops = &generic_hpc_ops;
 
 	status = cpci_hp_register_controller(&generic_hpc);
-	if(status != 0) {
+	if (status != 0) {
 		err("Could not register cPCI hotplug controller");
 		return -ENODEV;
 	}
 	dbg("registered controller");
 
 	status = cpci_hp_register_bus(bus, first_slot, last_slot);
-	if(status != 0) {
+	if (status != 0) {
 		err("Could not register cPCI hotplug bus");
 		goto init_bus_register_error;
 	}
 	dbg("registered bus");
 
 	status = cpci_hp_start();
-	if(status != 0) {
+	if (status != 0) {
 		err("Could not started cPCI hotplug system");
 		goto init_start_error;
 	}
diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
index 6757b3e..7ecf34e 100644
--- a/drivers/pci/hotplug/cpcihp_zt5550.c
+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
@@ -51,7 +51,7 @@
 		if (debug)					\
 			printk (KERN_DEBUG "%s: " format "\n",	\
 				MY_NAME , ## arg);		\
-	} while(0)
+	} while (0)
 #define err(format, arg...) printk(KERN_ERR "%s: " format "\n", MY_NAME , ## arg)
 #define info(format, arg...) printk(KERN_INFO "%s: " format "\n", MY_NAME , ## arg)
 #define warn(format, arg...) printk(KERN_WARNING "%s: " format "\n", MY_NAME , ## arg)
@@ -82,13 +82,13 @@
 	int ret;
 
 	/* Since we know that no boards exist with two HC chips, treat it as an error */
-	if(hc_dev) {
+	if (hc_dev) {
 		err("too many host controller devices?");
 		return -EBUSY;
 	}
 
 	ret = pci_enable_device(pdev);
-	if(ret) {
+	if (ret) {
 		err("cannot enable %s\n", pci_name(pdev));
 		return ret;
 	}
@@ -98,7 +98,7 @@
 	dbg("pci resource start %llx", (unsigned long long)pci_resource_start(hc_dev, 1));
 	dbg("pci resource len %llx", (unsigned long long)pci_resource_len(hc_dev, 1));
 
-	if(!request_mem_region(pci_resource_start(hc_dev, 1),
+	if (!request_mem_region(pci_resource_start(hc_dev, 1),
 				pci_resource_len(hc_dev, 1), MY_NAME)) {
 		err("cannot reserve MMIO region");
 		ret = -ENOMEM;
@@ -107,7 +107,7 @@
 
 	hc_registers =
 	    ioremap(pci_resource_start(hc_dev, 1), pci_resource_len(hc_dev, 1));
-	if(!hc_registers) {
+	if (!hc_registers) {
 		err("cannot remap MMIO region %llx @ %llx",
 			(unsigned long long)pci_resource_len(hc_dev, 1),
 			(unsigned long long)pci_resource_start(hc_dev, 1));
@@ -146,7 +146,7 @@
 
 static int zt5550_hc_cleanup(void)
 {
-	if(!hc_dev)
+	if (!hc_dev)
 		return -ENODEV;
 
 	iounmap(hc_registers);
@@ -170,9 +170,9 @@
 	u8 reg;
 
 	ret = 0;
-	if(dev_id == zt5550_hpc.dev_id) {
+	if (dev_id == zt5550_hpc.dev_id) {
 		reg = readb(csr_int_status);
-		if(reg)
+		if (reg)
 			ret = 1;
 	}
 	return ret;
@@ -182,9 +182,9 @@
 {
 	u8 reg;
 
-	if(hc_dev == NULL) {
+	if (hc_dev == NULL)
 		return -ENODEV;
-	}
+
 	reg = readb(csr_int_mask);
 	reg = reg & ~ENUM_INT_MASK;
 	writeb(reg, csr_int_mask);
@@ -195,9 +195,8 @@
 {
 	u8 reg;
 
-	if(hc_dev == NULL) {
+	if (hc_dev == NULL)
 		return -ENODEV;
-	}
 
 	reg = readb(csr_int_mask);
 	reg = reg | ENUM_INT_MASK;
@@ -210,15 +209,15 @@
 	int status;
 
 	status = zt5550_hc_config(pdev);
-	if(status != 0) {
+	if (status != 0)
 		return status;
-	}
+
 	dbg("returned from zt5550_hc_config");
 
 	memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
 	zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
 	zt5550_hpc.ops = &zt5550_hpc_ops;
-	if(!poll) {
+	if (!poll) {
 		zt5550_hpc.irq = hc_dev->irq;
 		zt5550_hpc.irq_flags = IRQF_SHARED;
 		zt5550_hpc.dev_id = hc_dev;
@@ -231,15 +230,16 @@
 	}
 
 	status = cpci_hp_register_controller(&zt5550_hpc);
-	if(status != 0) {
+	if (status != 0) {
 		err("could not register cPCI hotplug controller");
 		goto init_hc_error;
 	}
 	dbg("registered controller");
 
 	/* Look for first device matching cPCI bus's bridge vendor and device IDs */
-	if(!(bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC,
-					 PCI_DEVICE_ID_DEC_21154, NULL))) {
+	bus0_dev = pci_get_device(PCI_VENDOR_ID_DEC,
+				  PCI_DEVICE_ID_DEC_21154, NULL);
+	if (!bus0_dev) {
 		status = -ENODEV;
 		goto init_register_error;
 	}
@@ -247,14 +247,14 @@
 	pci_dev_put(bus0_dev);
 
 	status = cpci_hp_register_bus(bus0, 0x0a, 0x0f);
-	if(status != 0) {
+	if (status != 0) {
 		err("could not register cPCI hotplug bus");
 		goto init_register_error;
 	}
 	dbg("registered bus");
 
 	status = cpci_hp_start();
-	if(status != 0) {
+	if (status != 0) {
 		err("could not started cPCI hotplug system");
 		cpci_hp_unregister_bus(bus0);
 		goto init_register_error;
@@ -300,11 +300,11 @@
 
 	info(DRIVER_DESC " version: " DRIVER_VERSION);
 	r = request_region(ENUM_PORT, 1, "#ENUM hotswap signal register");
-	if(!r)
+	if (!r)
 		return -EBUSY;
 
 	rc = pci_register_driver(&zt5550_hc_driver);
-	if(rc < 0)
+	if (rc < 0)
 		release_region(ENUM_PORT, 1);
 	return rc;
 }
diff --git a/drivers/pci/hotplug/cpqphp.h b/drivers/pci/hotplug/cpqphp.h
index 0450f40..b28b2d2 100644
--- a/drivers/pci/hotplug/cpqphp.h
+++ b/drivers/pci/hotplug/cpqphp.h
@@ -690,7 +690,7 @@
 
 	status = (readl(ctrl->hpc_reg + INT_INPUT_CLEAR) & (0x01L << hp_slot));
 
-	return(status == 0) ? 1 : 0;
+	return (status == 0) ? 1 : 0;
 }
 
 
diff --git a/drivers/pci/hotplug/cpqphp_core.c b/drivers/pci/hotplug/cpqphp_core.c
index 4aaee74..a53084d 100644
--- a/drivers/pci/hotplug/cpqphp_core.c
+++ b/drivers/pci/hotplug/cpqphp_core.c
@@ -1096,9 +1096,8 @@
 
 	/* initialize our threads if they haven't already been started up */
 	rc = one_time_init();
-	if (rc) {
+	if (rc)
 		goto err_free_bus;
-	}
 
 	dbg("pdev = %p\n", pdev);
 	dbg("pci resource start %llx\n", (unsigned long long)pci_resource_start(pdev, 0));
diff --git a/drivers/pci/hotplug/cpqphp_ctrl.c b/drivers/pci/hotplug/cpqphp_ctrl.c
index bde47fc..c5cbefe 100644
--- a/drivers/pci/hotplug/cpqphp_ctrl.c
+++ b/drivers/pci/hotplug/cpqphp_ctrl.c
@@ -705,9 +705,8 @@
 		if (temp == max) {
 			*head = max->next;
 		} else {
-			while (temp && temp->next != max) {
+			while (temp && temp->next != max)
 				temp = temp->next;
-			}
 
 			if (temp)
 				temp->next = max->next;
@@ -903,9 +902,8 @@
 	/*
 	 * Check to see if it was our interrupt
 	 */
-	if (!(misc & 0x000C)) {
+	if (!(misc & 0x000C))
 		return IRQ_NONE;
-	}
 
 	if (misc & 0x0004) {
 		/*
@@ -1143,7 +1141,7 @@
 	/* We don't allow freq/mode changes if we find another adapter running
 	 * in another slot on this controller
 	 */
-	for(slot = ctrl->slot; slot; slot = slot->next) {
+	for (slot = ctrl->slot; slot; slot = slot->next) {
 		if (slot->device == (hp_slot + ctrl->slot_device_offset))
 			continue;
 		if (!slot->hotplug_slot || !slot->hotplug_slot->info)
@@ -1193,7 +1191,7 @@
 
 	reg16 = readw(ctrl->hpc_reg + NEXT_CURR_FREQ);
 	reg16 &= ~0x000F;
-	switch(adapter_speed) {
+	switch (adapter_speed) {
 		case(PCI_SPEED_133MHz_PCIX):
 			reg = 0x75;
 			reg16 |= 0xB;
@@ -2006,9 +2004,8 @@
 	/* Check to see if the interlock is closed */
 	tempdword = readl(ctrl->hpc_reg + INT_INPUT_CLEAR);
 
-	if (tempdword & (0x01 << hp_slot)) {
+	if (tempdword & (0x01 << hp_slot))
 		return 1;
-	}
 
 	if (func->is_a_board) {
 		rc = board_replaced(func, ctrl);
@@ -2070,9 +2067,8 @@
 		}
 	}
 
-	if (rc) {
+	if (rc)
 		dbg("%s: rc = %d\n", __func__, rc);
-	}
 
 	if (p_slot)
 		update_slot_info(ctrl, p_slot);
@@ -2095,9 +2091,8 @@
 	device = func->device;
 	func = cpqhp_slot_find(ctrl->bus, device, index++);
 	p_slot = cpqhp_find_slot(ctrl, device);
-	if (p_slot) {
+	if (p_slot)
 		physical_slot = p_slot->number;
-	}
 
 	/* Make sure there are no video controllers here */
 	while (func && !rc) {
diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
index 0968a9b..1e08ff8 100644
--- a/drivers/pci/hotplug/cpqphp_nvram.c
+++ b/drivers/pci/hotplug/cpqphp_nvram.c
@@ -204,9 +204,8 @@
 	u8 temp_byte = 0xFF;
 	u32 rc;
 
-	if (!check_for_compaq_ROM(rom_start)) {
+	if (!check_for_compaq_ROM(rom_start))
 		return -ENODEV;
-	}
 
 	available = 1024;
 
@@ -250,9 +249,8 @@
 
 	available = 1024;
 
-	if (!check_for_compaq_ROM(rom_start)) {
+	if (!check_for_compaq_ROM(rom_start))
 		return(1);
-	}
 
 	buffer = (u32*) evbuffer;
 
@@ -427,9 +425,9 @@
 
 void compaq_nvram_init (void __iomem *rom_start)
 {
-	if (rom_start) {
+	if (rom_start)
 		compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
-	}
+
 	dbg("int15 entry  = %p\n", compaq_int15_entry_point);
 
 	/* initialize our int15 lock */
@@ -661,9 +659,8 @@
 
 	if (evbuffer_init) {
 		rc = store_HRT(rom_start);
-		if (rc) {
+		if (rc)
 			err(msg_unable_to_save);
-		}
 	}
 	return rc;
 }
diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c
index f7b8684..3efaf4c 100644
--- a/drivers/pci/hotplug/ibmphp_core.c
+++ b/drivers/pci/hotplug/ibmphp_core.c
@@ -1023,7 +1023,8 @@
 	debug("ENABLING SLOT........\n");
 	slot_cur = hs->private;
 
-	if ((rc = validate(slot_cur, ENABLE))) {
+	rc = validate(slot_cur, ENABLE);
+	if (rc) {
 		err("validate function failed\n");
 		goto error_nopower;
 	}
@@ -1199,9 +1200,8 @@
 
 	debug("DISABLING SLOT...\n");
 
-	if ((slot_cur == NULL) || (slot_cur->ctrl == NULL)) {
+	if ((slot_cur == NULL) || (slot_cur->ctrl == NULL))
 		return -ENODEV;
-	}
 
 	flag = slot_cur->flag;
 	slot_cur->flag = 1;
@@ -1336,17 +1336,20 @@
 	for (i = 0; i < 16; i++)
 		irqs[i] = 0;
 
-	if ((rc = ibmphp_access_ebda()))
+	rc = ibmphp_access_ebda();
+	if (rc)
 		goto error;
 	debug("after ibmphp_access_ebda()\n");
 
-	if ((rc = ibmphp_rsrc_init()))
+	rc = ibmphp_rsrc_init();
+	if (rc)
 		goto error;
 	debug("AFTER Resource & EBDA INITIALIZATIONS\n");
 
 	max_slots = get_max_slots();
 
-	if ((rc = ibmphp_register_pci()))
+	rc = ibmphp_register_pci();
+	if (rc)
 		goto error;
 
 	if (init_ops()) {
@@ -1355,9 +1358,9 @@
 	}
 
 	ibmphp_print_test();
-	if ((rc = ibmphp_hpc_start_poll_thread())) {
+	rc = ibmphp_hpc_start_poll_thread();
+	if (rc)
 		goto error;
-	}
 
 exit:
 	return rc;
diff --git a/drivers/pci/hotplug/ibmphp_ebda.c b/drivers/pci/hotplug/ibmphp_ebda.c
index 0f65ac5..d9b197d 100644
--- a/drivers/pci/hotplug/ibmphp_ebda.c
+++ b/drivers/pci/hotplug/ibmphp_ebda.c
@@ -215,9 +215,8 @@
 			debug ("%s - cap of the slot: %x\n", __func__, hpc_ptr->slots[index].slot_cap);
 		}
 
-		for (index = 0; index < hpc_ptr->bus_count; index++) {
+		for (index = 0; index < hpc_ptr->bus_count; index++)
 			debug ("%s - bus# of each bus controlled by this ctlr: %x\n", __func__, hpc_ptr->buses[index].bus_num);
-		}
 
 		debug ("%s - type of hpc: %x\n", __func__, hpc_ptr->ctlr_type);
 		switch (hpc_ptr->ctlr_type) {
diff --git a/drivers/pci/hotplug/ibmphp_hpc.c b/drivers/pci/hotplug/ibmphp_hpc.c
index a936022..2208767 100644
--- a/drivers/pci/hotplug/ibmphp_hpc.c
+++ b/drivers/pci/hotplug/ibmphp_hpc.c
@@ -997,9 +997,8 @@
 		rc = ibmphp_do_disable_slot (pslot);
 	}
 
-	if (update || disable) {
+	if (update || disable)
 		ibmphp_update_slot_info (pslot);
-	}
 
 	debug ("%s - Exit rc[%d] disable[%x] update[%x]\n", __func__, rc, disable, update);
 
diff --git a/drivers/pci/hotplug/ibmphp_pci.c b/drivers/pci/hotplug/ibmphp_pci.c
index 2fd2967..814cea2 100644
--- a/drivers/pci/hotplug/ibmphp_pci.c
+++ b/drivers/pci/hotplug/ibmphp_pci.c
@@ -145,7 +145,8 @@
 				case PCI_HEADER_TYPE_NORMAL:
 					debug ("single device case.... vendor id = %x, hdr_type = %x, class = %x\n", vendor_id, hdr_type, class);
 					assign_alt_irq (cur_func, class_code);
-					if ((rc = configure_device (cur_func)) < 0) {
+					rc = configure_device(cur_func);
+					if (rc < 0) {
 						/* We need to do this in case some other BARs were properly inserted */
 						err ("was not able to configure devfunc %x on bus %x.\n",
 						     cur_func->device, cur_func->busno);
@@ -157,7 +158,8 @@
 					break;
 				case PCI_HEADER_TYPE_MULTIDEVICE:
 					assign_alt_irq (cur_func, class_code);
-					if ((rc = configure_device (cur_func)) < 0) {
+					rc = configure_device(cur_func);
+					if (rc < 0) {
 						/* We need to do this in case some other BARs were properly inserted */
 						err ("was not able to configure devfunc %x on bus %x...bailing out\n",
 						     cur_func->device, cur_func->busno);
diff --git a/drivers/pci/hotplug/ibmphp_res.c b/drivers/pci/hotplug/ibmphp_res.c
index f34745a..219ba80 100644
--- a/drivers/pci/hotplug/ibmphp_res.c
+++ b/drivers/pci/hotplug/ibmphp_res.c
@@ -224,7 +224,8 @@
 			if ((curr->rsrc_type & RESTYPE) == MMASK) {
 				/* no bus structure exists in place yet */
 				if (list_empty (&gbuses)) {
-					if ((rc = alloc_bus_range (&newbus, &newrange, curr, MEM, 1)))
+					rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
+					if (rc)
 						return rc;
 					list_add_tail (&newbus->bus_list, &gbuses);
 					debug ("gbuses = NULL, Memory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -237,7 +238,8 @@
 							return rc;
 					} else {
 						/* went through all the buses and didn't find ours, need to create a new bus node */
-						if ((rc = alloc_bus_range (&newbus, &newrange, curr, MEM, 1)))
+						rc = alloc_bus_range(&newbus, &newrange, curr, MEM, 1);
+						if (rc)
 							return rc;
 
 						list_add_tail (&newbus->bus_list, &gbuses);
@@ -248,7 +250,8 @@
 				/* prefetchable memory */
 				if (list_empty (&gbuses)) {
 					/* no bus structure exists in place yet */
-					if ((rc = alloc_bus_range (&newbus, &newrange, curr, PFMEM, 1)))
+					rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
+					if (rc)
 						return rc;
 					list_add_tail (&newbus->bus_list, &gbuses);
 					debug ("gbuses = NULL, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -261,7 +264,8 @@
 							return rc;
 					} else {
 						/* went through all the buses and didn't find ours, need to create a new bus node */
-						if ((rc = alloc_bus_range (&newbus, &newrange, curr, PFMEM, 1)))
+						rc = alloc_bus_range(&newbus, &newrange, curr, PFMEM, 1);
+						if (rc)
 							return rc;
 						list_add_tail (&newbus->bus_list, &gbuses);
 						debug ("1st Bus, PFMemory Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -271,7 +275,8 @@
 				/* IO */
 				if (list_empty (&gbuses)) {
 					/* no bus structure exists in place yet */
-					if ((rc = alloc_bus_range (&newbus, &newrange, curr, IO, 1)))
+					rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
+					if (rc)
 						return rc;
 					list_add_tail (&newbus->bus_list, &gbuses);
 					debug ("gbuses = NULL, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -283,7 +288,8 @@
 							return rc;
 					} else {
 						/* went through all the buses and didn't find ours, need to create a new bus node */
-						if ((rc = alloc_bus_range (&newbus, &newrange, curr, IO, 1)))
+						rc = alloc_bus_range(&newbus, &newrange, curr, IO, 1);
+						if (rc)
 							return rc;
 						list_add_tail (&newbus->bus_list, &gbuses);
 						debug ("1st Bus, IO Primary Bus %x [%x - %x]\n", newbus->busno, newrange->start, newrange->end);
@@ -1038,7 +1044,9 @@
 		/* found our range */
 		if (!res_prev) {
 			/* first time in the loop */
-			if ((res_cur->start != range->start) && ((len_tmp = res_cur->start - 1 - range->start) >= res->len)) {
+			len_tmp = res_cur->start - 1 - range->start;
+
+			if ((res_cur->start != range->start) && (len_tmp >= res->len)) {
 				debug ("len_tmp = %x\n", len_tmp);
 
 				if ((len_tmp < len_cur) || (len_cur == 0)) {
@@ -1078,7 +1086,9 @@
 		}
 		if (!res_cur->next) {
 			/* last device on the range */
-			if ((range->end != res_cur->end) && ((len_tmp = range->end - (res_cur->end + 1)) >= res->len)) {
+			len_tmp = range->end - (res_cur->end + 1);
+
+			if ((range->end != res_cur->end) && (len_tmp >= res->len)) {
 				debug ("len_tmp = %x\n", len_tmp);
 				if ((len_tmp < len_cur) || (len_cur == 0)) {
 
@@ -1117,8 +1127,9 @@
 		if (res_prev) {
 			if (res_prev->rangeno != res_cur->rangeno) {
 				/* 1st device on this range */
-				if ((res_cur->start != range->start) &&
-					((len_tmp = res_cur->start - 1 - range->start) >= res->len)) {
+				len_tmp = res_cur->start - 1 - range->start;
+
+				if ((res_cur->start != range->start) &&	(len_tmp >= res->len)) {
 					if ((len_tmp < len_cur) || (len_cur == 0)) {
 						if ((range->start % tmp_divide) == 0) {
 							/* just perfect, starting address is divisible by length */
@@ -1153,7 +1164,9 @@
 				}
 			} else {
 				/* in the same range */
-				if ((len_tmp = res_cur->start - 1 - res_prev->end - 1) >= res->len) {
+				len_tmp = res_cur->start - 1 - res_prev->end - 1;
+
+				if (len_tmp >= res->len) {
 					if ((len_tmp < len_cur) || (len_cur == 0)) {
 						if (((res_prev->end + 1) % tmp_divide) == 0) {
 							/* just perfect, starting address's divisible by length */
@@ -1212,7 +1225,9 @@
 				break;
 		}
 		while (range) {
-			if ((len_tmp = range->end - range->start) >= res->len) {
+			len_tmp = range->end - range->start;
+
+			if (len_tmp >= res->len) {
 				if ((len_tmp < len_cur) || (len_cur == 0)) {
 					if ((range->start % tmp_divide) == 0) {
 						/* just perfect, starting address's divisible by length */
@@ -1276,7 +1291,9 @@
 					break;
 			}
 			while (range) {
-				if ((len_tmp = range->end - range->start) >= res->len) {
+				len_tmp = range->end - range->start;
+
+				if (len_tmp >= res->len) {
 					if ((len_tmp < len_cur) || (len_cur == 0)) {
 						if ((range->start % tmp_divide) == 0) {
 							/* just perfect, starting address's divisible by length */
@@ -1335,7 +1352,7 @@
 				return -EINVAL;
 			}
 		}
-	}	/* end if(!res_cur) */
+	}	/* end if (!res_cur) */
 	return -EINVAL;
 }
 
diff --git a/drivers/pci/hotplug/pciehp.h b/drivers/pci/hotplug/pciehp.h
index 9e5a9fb..b115219 100644
--- a/drivers/pci/hotplug/pciehp.h
+++ b/drivers/pci/hotplug/pciehp.h
@@ -92,7 +92,7 @@
 	struct slot *slot;
 	wait_queue_head_t queue;	/* sleep & wake process */
 	u32 slot_cap;
-	u32 slot_ctrl;
+	u16 slot_ctrl;
 	struct timer_list poll_timer;
 	unsigned long cmd_started;	/* jiffies */
 	unsigned int cmd_busy:1;
diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
index 07aa722..3a5e7e2 100644
--- a/drivers/pci/hotplug/pciehp_core.c
+++ b/drivers/pci/hotplug/pciehp_core.c
@@ -262,6 +262,13 @@
 		goto err_out_none;
 	}
 
+	if (!dev->port->subordinate) {
+		/* Can happen if we run out of bus numbers during probe */
+		dev_err(&dev->device,
+			"Hotplug bridge without secondary bus, ignoring\n");
+		goto err_out_none;
+	}
+
 	ctrl = pcie_init(dev);
 	if (!ctrl) {
 		dev_err(&dev->device, "Controller initialization failed\n");
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 9da84b8..f0dc6cb 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -171,9 +171,9 @@
 	 * interrupts.
 	 */
 	if (!rc)
-		ctrl_info(ctrl, "Timeout on hotplug command %#010x (issued %u msec ago)\n",
+		ctrl_info(ctrl, "Timeout on hotplug command %#06x (issued %u msec ago)\n",
 			  ctrl->slot_ctrl,
-			  jiffies_to_msecs(now - ctrl->cmd_started));
+			  jiffies_to_msecs(jiffies - ctrl->cmd_started));
 }
 
 /**
@@ -422,9 +422,9 @@
 	default:
 		return;
 	}
+	pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
 	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
 		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, slot_cmd);
-	pcie_write_cmd(ctrl, slot_cmd, PCI_EXP_SLTCTL_AIC);
 }
 
 void pciehp_green_led_on(struct slot *slot)
@@ -602,6 +602,8 @@
 		PCI_EXP_SLTCTL_DLLSCE);
 
 	pcie_write_cmd(ctrl, cmd, mask);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, cmd);
 }
 
 static void pcie_disable_notification(struct controller *ctrl)
@@ -613,6 +615,8 @@
 		PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE |
 		PCI_EXP_SLTCTL_DLLSCE);
 	pcie_write_cmd(ctrl, 0, mask);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
 }
 
 /*
@@ -640,6 +644,8 @@
 	stat_mask |= PCI_EXP_SLTSTA_DLLSC;
 
 	pcie_write_cmd(ctrl, 0, ctrl_mask);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, 0);
 	if (pciehp_poll_mode)
 		del_timer_sync(&ctrl->poll_timer);
 
@@ -647,6 +653,8 @@
 
 	pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, stat_mask);
 	pcie_write_cmd(ctrl, ctrl_mask, ctrl_mask);
+	ctrl_dbg(ctrl, "%s: SLOTCTRL %x write cmd %x\n", __func__,
+		 pci_pcie_cap(ctrl->pcie->port) + PCI_EXP_SLTCTL, ctrl_mask);
 	if (pciehp_poll_mode)
 		int_poll_timeout(ctrl->poll_timer.data);
 
@@ -785,9 +793,6 @@
 		PCI_EXP_SLTSTA_MRLSC | PCI_EXP_SLTSTA_PDC |
 		PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_DLLSC);
 
-	/* Disable software notification */
-	pcie_disable_notification(ctrl);
-
 	ctrl_info(ctrl, "Slot #%d AttnBtn%c AttnInd%c PwrInd%c PwrCtrl%c MRL%c Interlock%c NoCompl%c LLActRep%c\n",
 		(slot_cap & PCI_EXP_SLTCAP_PSN) >> 19,
 		FLAG(slot_cap, PCI_EXP_SLTCAP_ABP),
diff --git a/drivers/pci/hotplug/pciehp_pci.c b/drivers/pci/hotplug/pciehp_pci.c
index 5f871f4..9e69403 100644
--- a/drivers/pci/hotplug/pciehp_pci.c
+++ b/drivers/pci/hotplug/pciehp_pci.c
@@ -65,14 +65,7 @@
 			pci_hp_add_bridge(dev);
 
 	pci_assign_unassigned_bridge_resources(bridge);
-
-	list_for_each_entry(dev, &parent->devices, bus_list) {
-		if ((dev->class >> 16) == PCI_BASE_CLASS_DISPLAY)
-			continue;
-
-		pci_configure_slot(dev);
-	}
-
+	pcie_bus_configure_settings(parent);
 	pci_bus_add_devices(parent);
 
  out:
diff --git a/drivers/pci/hotplug/pcihp_slot.c b/drivers/pci/hotplug/pcihp_slot.c
deleted file mode 100644
index e246a10..0000000
--- a/drivers/pci/hotplug/pcihp_slot.c
+++ /dev/null
@@ -1,180 +0,0 @@
-/*
- * Copyright (C) 1995,2001 Compaq Computer Corporation
- * Copyright (C) 2001 Greg Kroah-Hartman (greg@kroah.com)
- * Copyright (C) 2001 IBM Corp.
- * Copyright (C) 2003-2004 Intel Corporation
- * (c) Copyright 2009 Hewlett-Packard Development Company, L.P.
- *
- * All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or (at
- * your option) any later version.
- *
- * This program is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
- * NON INFRINGEMENT.  See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <linux/pci.h>
-#include <linux/export.h>
-#include <linux/pci_hotplug.h>
-
-static struct hpp_type0 pci_default_type0 = {
-	.revision = 1,
-	.cache_line_size = 8,
-	.latency_timer = 0x40,
-	.enable_serr = 0,
-	.enable_perr = 0,
-};
-
-static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
-{
-	u16 pci_cmd, pci_bctl;
-
-	if (!hpp) {
-		/*
-		 * Perhaps we *should* use default settings for PCIe, but
-		 * pciehp didn't, so we won't either.
-		 */
-		if (pci_is_pcie(dev))
-			return;
-		dev_info(&dev->dev, "using default PCI settings\n");
-		hpp = &pci_default_type0;
-	}
-
-	if (hpp->revision > 1) {
-		dev_warn(&dev->dev,
-			 "PCI settings rev %d not supported; using defaults\n",
-			 hpp->revision);
-		hpp = &pci_default_type0;
-	}
-
-	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
-	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
-	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
-	if (hpp->enable_serr)
-		pci_cmd |= PCI_COMMAND_SERR;
-	else
-		pci_cmd &= ~PCI_COMMAND_SERR;
-	if (hpp->enable_perr)
-		pci_cmd |= PCI_COMMAND_PARITY;
-	else
-		pci_cmd &= ~PCI_COMMAND_PARITY;
-	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
-
-	/* Program bridge control value */
-	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
-		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
-				      hpp->latency_timer);
-		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
-		if (hpp->enable_serr)
-			pci_bctl |= PCI_BRIDGE_CTL_SERR;
-		else
-			pci_bctl &= ~PCI_BRIDGE_CTL_SERR;
-		if (hpp->enable_perr)
-			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
-		else
-			pci_bctl &= ~PCI_BRIDGE_CTL_PARITY;
-		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
-	}
-}
-
-static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
-{
-	if (hpp)
-		dev_warn(&dev->dev, "PCI-X settings not supported\n");
-}
-
-static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
-{
-	int pos;
-	u32 reg32;
-
-	if (!hpp)
-		return;
-
-	if (hpp->revision > 1) {
-		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
-			 hpp->revision);
-		return;
-	}
-
-	/* Initialize Device Control Register */
-	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
-			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
-
-	/* Initialize Link Control Register */
-	if (dev->subordinate)
-		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
-			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
-
-	/* Find Advanced Error Reporting Enhanced Capability */
-	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
-	if (!pos)
-		return;
-
-	/* Initialize Uncorrectable Error Mask Register */
-	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
-	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
-	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
-
-	/* Initialize Uncorrectable Error Severity Register */
-	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
-	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
-	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
-
-	/* Initialize Correctable Error Mask Register */
-	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
-	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
-	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
-
-	/* Initialize Advanced Error Capabilities and Control Register */
-	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
-	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
-	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
-
-	/*
-	 * FIXME: The following two registers are not supported yet.
-	 *
-	 *   o Secondary Uncorrectable Error Severity Register
-	 *   o Secondary Uncorrectable Error Mask Register
-	 */
-}
-
-void pci_configure_slot(struct pci_dev *dev)
-{
-	struct pci_dev *cdev;
-	struct hotplug_params hpp;
-	int ret;
-
-	if (!(dev->hdr_type == PCI_HEADER_TYPE_NORMAL ||
-			(dev->hdr_type == PCI_HEADER_TYPE_BRIDGE &&
-			(dev->class >> 8) == PCI_CLASS_BRIDGE_PCI)))
-		return;
-
-	pcie_bus_configure_settings(dev->bus);
-
-	memset(&hpp, 0, sizeof(hpp));
-	ret = pci_get_hp_params(dev, &hpp);
-	if (ret)
-		dev_warn(&dev->dev, "no hotplug settings from platform\n");
-
-	program_hpp_type2(dev, hpp.t2);
-	program_hpp_type1(dev, hpp.t1);
-	program_hpp_type0(dev, hpp.t0);
-
-	if (dev->subordinate) {
-		list_for_each_entry(cdev, &dev->subordinate->devices,
-				    bus_list)
-			pci_configure_slot(cdev);
-	}
-}
-EXPORT_SYMBOL_GPL(pci_configure_slot);
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index a81fb67..10c7927 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -195,7 +195,8 @@
 	int rc = 0;
 
 	ctrl_dbg(ctrl, "Change speed to %d\n", speed);
-	if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed))) {
+	rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, speed);
+	if (rc) {
 		ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
 			 __func__);
 		return WRONG_BUS_FREQUENCY;
@@ -261,14 +262,16 @@
 	}
 
 	if ((ctrl->pci_dev->vendor == 0x8086) && (ctrl->pci_dev->device == 0x0332)) {
-		if ((rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz))) {
+		rc = p_slot->hpc_ops->set_bus_speed_mode(p_slot, PCI_SPEED_33MHz);
+		if (rc) {
 			ctrl_err(ctrl, "%s: Issue of set bus speed mode command failed\n",
 				 __func__);
 			return WRONG_BUS_FREQUENCY;
 		}
 
 		/* turn on board, blink green LED, turn off Amber LED */
-		if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) {
+		rc = p_slot->hpc_ops->slot_enable(p_slot);
+		if (rc) {
 			ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
 			return rc;
 		}
@@ -296,7 +299,8 @@
 		return rc;
 
 	/* turn on board, blink green LED, turn off Amber LED */
-	if ((rc = p_slot->hpc_ops->slot_enable(p_slot))) {
+	rc = p_slot->hpc_ops->slot_enable(p_slot);
+	if (rc) {
 		ctrl_err(ctrl, "Issue of Slot Enable command failed\n");
 		return rc;
 	}
@@ -595,7 +599,7 @@
 	ctrl_dbg(ctrl, "%s: p_slot->pwr_save %x\n", __func__, p_slot->pwr_save);
 	p_slot->hpc_ops->get_latch_status(p_slot, &getstatus);
 
-	if(((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
+	if (((p_slot->ctrl->pci_dev->vendor == PCI_VENDOR_ID_AMD) ||
 	    (p_slot->ctrl->pci_dev->device == PCI_DEVICE_ID_AMD_POGO_7458))
 	     && p_slot->ctrl->num_slots == 1) {
 		/* handle amd pogo errata; this must be done before enable  */
diff --git a/drivers/pci/hotplug/shpchp_hpc.c b/drivers/pci/hotplug/shpchp_hpc.c
index 29e2235..7d223e9 100644
--- a/drivers/pci/hotplug/shpchp_hpc.c
+++ b/drivers/pci/hotplug/shpchp_hpc.c
@@ -466,7 +466,8 @@
 	u8 m66_cap  = !!(slot_reg & MHZ66_CAP);
 	u8 pi, pcix_cap;
 
-	if ((retval = hpc_get_prog_int(slot, &pi)))
+	retval = hpc_get_prog_int(slot, &pi);
+	if (retval)
 		return retval;
 
 	switch (pi) {
@@ -798,7 +799,7 @@
 
 	ctrl_dbg(ctrl, "%s: intr_loc = %x\n", __func__, intr_loc);
 
-	if(!shpchp_poll_mode) {
+	if (!shpchp_poll_mode) {
 		/*
 		 * Mask Global Interrupt Mask - see implementation
 		 * note on p. 139 of SHPC spec rev 1.0
diff --git a/drivers/pci/hotplug/shpchp_pci.c b/drivers/pci/hotplug/shpchp_pci.c
index 469454e..f8cd3a2 100644
--- a/drivers/pci/hotplug/shpchp_pci.c
+++ b/drivers/pci/hotplug/shpchp_pci.c
@@ -69,13 +69,7 @@
 	}
 
 	pci_assign_unassigned_bridge_resources(bridge);
-
-	list_for_each_entry(dev, &parent->devices, bus_list) {
-		if (PCI_SLOT(dev->devfn) != p_slot->device)
-			continue;
-		pci_configure_slot(dev);
-	}
-
+	pcie_bus_configure_settings(parent);
 	pci_bus_add_devices(parent);
 
  out:
diff --git a/drivers/pci/iov.c b/drivers/pci/iov.c
index cb6f247..4d109c0 100644
--- a/drivers/pci/iov.c
+++ b/drivers/pci/iov.c
@@ -633,7 +633,7 @@
 		 * our dev as the physical function and the assigned bit is set
 		 */
 		if (vfdev->is_virtfn && (vfdev->physfn == dev) &&
-		    (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED))
+			pci_is_dev_assigned(vfdev))
 			vfs_assigned++;
 
 		vfdev = pci_get_device(dev->vendor, dev_id, vfdev);
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index 37263b0..6ebf8ed 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -10,6 +10,7 @@
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <linux/pci_hotplug.h>
 #include <linux/module.h>
 #include <linux/pci-aspm.h>
 #include <linux/pci-acpi.h>
@@ -17,6 +18,267 @@
 #include <linux/pm_qos.h>
 #include "pci.h"
 
+phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
+{
+	acpi_status status = AE_NOT_EXIST;
+	unsigned long long mcfg_addr;
+
+	if (handle)
+		status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
+					       NULL, &mcfg_addr);
+	if (ACPI_FAILURE(status))
+		return 0;
+
+	return (phys_addr_t)mcfg_addr;
+}
+
+static acpi_status decode_type0_hpx_record(union acpi_object *record,
+					   struct hotplug_params *hpx)
+{
+	int i;
+	union acpi_object *fields = record->package.elements;
+	u32 revision = fields[1].integer.value;
+
+	switch (revision) {
+	case 1:
+		if (record->package.count != 6)
+			return AE_ERROR;
+		for (i = 2; i < 6; i++)
+			if (fields[i].type != ACPI_TYPE_INTEGER)
+				return AE_ERROR;
+		hpx->t0 = &hpx->type0_data;
+		hpx->t0->revision        = revision;
+		hpx->t0->cache_line_size = fields[2].integer.value;
+		hpx->t0->latency_timer   = fields[3].integer.value;
+		hpx->t0->enable_serr     = fields[4].integer.value;
+		hpx->t0->enable_perr     = fields[5].integer.value;
+		break;
+	default:
+		printk(KERN_WARNING
+		       "%s: Type 0 Revision %d record not supported\n",
+		       __func__, revision);
+		return AE_ERROR;
+	}
+	return AE_OK;
+}
+
+static acpi_status decode_type1_hpx_record(union acpi_object *record,
+					   struct hotplug_params *hpx)
+{
+	int i;
+	union acpi_object *fields = record->package.elements;
+	u32 revision = fields[1].integer.value;
+
+	switch (revision) {
+	case 1:
+		if (record->package.count != 5)
+			return AE_ERROR;
+		for (i = 2; i < 5; i++)
+			if (fields[i].type != ACPI_TYPE_INTEGER)
+				return AE_ERROR;
+		hpx->t1 = &hpx->type1_data;
+		hpx->t1->revision      = revision;
+		hpx->t1->max_mem_read  = fields[2].integer.value;
+		hpx->t1->avg_max_split = fields[3].integer.value;
+		hpx->t1->tot_max_split = fields[4].integer.value;
+		break;
+	default:
+		printk(KERN_WARNING
+		       "%s: Type 1 Revision %d record not supported\n",
+		       __func__, revision);
+		return AE_ERROR;
+	}
+	return AE_OK;
+}
+
+static acpi_status decode_type2_hpx_record(union acpi_object *record,
+					   struct hotplug_params *hpx)
+{
+	int i;
+	union acpi_object *fields = record->package.elements;
+	u32 revision = fields[1].integer.value;
+
+	switch (revision) {
+	case 1:
+		if (record->package.count != 18)
+			return AE_ERROR;
+		for (i = 2; i < 18; i++)
+			if (fields[i].type != ACPI_TYPE_INTEGER)
+				return AE_ERROR;
+		hpx->t2 = &hpx->type2_data;
+		hpx->t2->revision      = revision;
+		hpx->t2->unc_err_mask_and      = fields[2].integer.value;
+		hpx->t2->unc_err_mask_or       = fields[3].integer.value;
+		hpx->t2->unc_err_sever_and     = fields[4].integer.value;
+		hpx->t2->unc_err_sever_or      = fields[5].integer.value;
+		hpx->t2->cor_err_mask_and      = fields[6].integer.value;
+		hpx->t2->cor_err_mask_or       = fields[7].integer.value;
+		hpx->t2->adv_err_cap_and       = fields[8].integer.value;
+		hpx->t2->adv_err_cap_or        = fields[9].integer.value;
+		hpx->t2->pci_exp_devctl_and    = fields[10].integer.value;
+		hpx->t2->pci_exp_devctl_or     = fields[11].integer.value;
+		hpx->t2->pci_exp_lnkctl_and    = fields[12].integer.value;
+		hpx->t2->pci_exp_lnkctl_or     = fields[13].integer.value;
+		hpx->t2->sec_unc_err_sever_and = fields[14].integer.value;
+		hpx->t2->sec_unc_err_sever_or  = fields[15].integer.value;
+		hpx->t2->sec_unc_err_mask_and  = fields[16].integer.value;
+		hpx->t2->sec_unc_err_mask_or   = fields[17].integer.value;
+		break;
+	default:
+		printk(KERN_WARNING
+		       "%s: Type 2 Revision %d record not supported\n",
+		       __func__, revision);
+		return AE_ERROR;
+	}
+	return AE_OK;
+}
+
+static acpi_status acpi_run_hpx(acpi_handle handle, struct hotplug_params *hpx)
+{
+	acpi_status status;
+	struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
+	union acpi_object *package, *record, *fields;
+	u32 type;
+	int i;
+
+	/* Clear the return buffer with zeros */
+	memset(hpx, 0, sizeof(struct hotplug_params));
+
+	status = acpi_evaluate_object(handle, "_HPX", NULL, &buffer);
+	if (ACPI_FAILURE(status))
+		return status;
+
+	package = (union acpi_object *)buffer.pointer;
+	if (package->type != ACPI_TYPE_PACKAGE) {
+		status = AE_ERROR;
+		goto exit;
+	}
+
+	for (i = 0; i < package->package.count; i++) {
+		record = &package->package.elements[i];
+		if (record->type != ACPI_TYPE_PACKAGE) {
+			status = AE_ERROR;
+			goto exit;
+		}
+
+		fields = record->package.elements;
+		if (fields[0].type != ACPI_TYPE_INTEGER ||
+		    fields[1].type != ACPI_TYPE_INTEGER) {
+			status = AE_ERROR;
+			goto exit;
+		}
+
+		type = fields[0].integer.value;
+		switch (type) {
+		case 0:
+			status = decode_type0_hpx_record(record, hpx);
+			if (ACPI_FAILURE(status))
+				goto exit;
+			break;
+		case 1:
+			status = decode_type1_hpx_record(record, hpx);
+			if (ACPI_FAILURE(status))
+				goto exit;
+			break;
+		case 2:
+			status = decode_type2_hpx_record(record, hpx);
+			if (ACPI_FAILURE(status))
+				goto exit;
+			break;
+		default:
+			printk(KERN_ERR "%s: Type %d record not supported\n",
+			       __func__, type);
+			status = AE_ERROR;
+			goto exit;
+		}
+	}
+ exit:
+	kfree(buffer.pointer);
+	return status;
+}
+
+static acpi_status acpi_run_hpp(acpi_handle handle, struct hotplug_params *hpp)
+{
+	acpi_status status;
+	struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
+	union acpi_object *package, *fields;
+	int i;
+
+	memset(hpp, 0, sizeof(struct hotplug_params));
+
+	status = acpi_evaluate_object(handle, "_HPP", NULL, &buffer);
+	if (ACPI_FAILURE(status))
+		return status;
+
+	package = (union acpi_object *) buffer.pointer;
+	if (package->type != ACPI_TYPE_PACKAGE ||
+	    package->package.count != 4) {
+		status = AE_ERROR;
+		goto exit;
+	}
+
+	fields = package->package.elements;
+	for (i = 0; i < 4; i++) {
+		if (fields[i].type != ACPI_TYPE_INTEGER) {
+			status = AE_ERROR;
+			goto exit;
+		}
+	}
+
+	hpp->t0 = &hpp->type0_data;
+	hpp->t0->revision        = 1;
+	hpp->t0->cache_line_size = fields[0].integer.value;
+	hpp->t0->latency_timer   = fields[1].integer.value;
+	hpp->t0->enable_serr     = fields[2].integer.value;
+	hpp->t0->enable_perr     = fields[3].integer.value;
+
+exit:
+	kfree(buffer.pointer);
+	return status;
+}
+
+/* pci_get_hp_params
+ *
+ * @dev - the pci_dev for which we want parameters
+ * @hpp - allocated by the caller
+ */
+int pci_get_hp_params(struct pci_dev *dev, struct hotplug_params *hpp)
+{
+	acpi_status status;
+	acpi_handle handle, phandle;
+	struct pci_bus *pbus;
+
+	handle = NULL;
+	for (pbus = dev->bus; pbus; pbus = pbus->parent) {
+		handle = acpi_pci_get_bridge_handle(pbus);
+		if (handle)
+			break;
+	}
+
+	/*
+	 * _HPP settings apply to all child buses, until another _HPP is
+	 * encountered. If we don't find an _HPP for the input pci dev,
+	 * look for it in the parent device scope since that would apply to
+	 * this pci dev.
+	 */
+	while (handle) {
+		status = acpi_run_hpx(handle, hpp);
+		if (ACPI_SUCCESS(status))
+			return 0;
+		status = acpi_run_hpp(handle, hpp);
+		if (ACPI_SUCCESS(status))
+			return 0;
+		if (acpi_is_root_bridge(handle))
+			break;
+		status = acpi_get_parent(handle, &phandle);
+		if (ACPI_FAILURE(status))
+			break;
+		handle = phandle;
+	}
+	return -ENODEV;
+}
+EXPORT_SYMBOL_GPL(pci_get_hp_params);
+
 /**
  * pci_acpi_wake_bus - Root bus wakeup notification fork function.
  * @work: Work item to handle.
@@ -84,20 +346,6 @@
 	return acpi_add_pm_notifier(dev, &pci_dev->dev, pci_acpi_wake_dev);
 }
 
-phys_addr_t acpi_pci_root_get_mcfg_addr(acpi_handle handle)
-{
-	acpi_status status = AE_NOT_EXIST;
-	unsigned long long mcfg_addr;
-
-	if (handle)
-		status = acpi_evaluate_integer(handle, METHOD_NAME__CBA,
-					       NULL, &mcfg_addr);
-	if (ACPI_FAILURE(status))
-		return 0;
-
-	return (phys_addr_t)mcfg_addr;
-}
-
 /*
  * _SxD returns the D-state with the highest power
  * (lowest D-state number) supported in the S-state "x".
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index d04c5ad..2b3c894 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -55,7 +55,6 @@
 		  unsigned long driver_data)
 {
 	struct pci_dynid *dynid;
-	int retval;
 
 	dynid = kzalloc(sizeof(*dynid), GFP_KERNEL);
 	if (!dynid)
@@ -73,9 +72,7 @@
 	list_add_tail(&dynid->node, &drv->dynids.list);
 	spin_unlock(&drv->dynids.lock);
 
-	retval = driver_attach(&drv->driver);
-
-	return retval;
+	return driver_attach(&drv->driver);
 }
 EXPORT_SYMBOL_GPL(pci_add_dynid);
 
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 9ff0a90..76ef791 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -177,7 +177,7 @@
 {
 	struct pci_dev *pci_dev = to_pci_dev(dev);
 
-	return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02x\n",
+	return sprintf(buf, "pci:v%08Xd%08Xsv%08Xsd%08Xbc%02Xsc%02Xi%02X\n",
 		       pci_dev->vendor, pci_dev->device,
 		       pci_dev->subsystem_vendor, pci_dev->subsystem_device,
 		       (u8)(pci_dev->class >> 16), (u8)(pci_dev->class >> 8),
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6e994fc..625a4ac 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1003,12 +1003,19 @@
 	for (i = 0; i < 16; i++)
 		pci_read_config_dword(dev, i * 4, &dev->saved_config_space[i]);
 	dev->state_saved = true;
-	if ((i = pci_save_pcie_state(dev)) != 0)
+
+	i = pci_save_pcie_state(dev);
+	if (i != 0)
 		return i;
-	if ((i = pci_save_pcix_state(dev)) != 0)
+
+	i = pci_save_pcix_state(dev);
+	if (i != 0)
 		return i;
-	if ((i = pci_save_vc_state(dev)) != 0)
+
+	i = pci_save_vc_state(dev);
+	if (i != 0)
 		return i;
+
 	return 0;
 }
 EXPORT_SYMBOL(pci_save_state);
@@ -1907,10 +1914,6 @@
 	if (target_state == PCI_POWER_ERROR)
 		return -EIO;
 
-	/* D3cold during system suspend/hibernate is not supported */
-	if (target_state > PCI_D3hot)
-		target_state = PCI_D3hot;
-
 	pci_enable_wake(dev, target_state, device_may_wakeup(&dev->dev));
 
 	error = pci_set_power_state(dev, target_state);
diff --git a/drivers/pci/pcie/aer/aerdrv_errprint.c b/drivers/pci/pcie/aer/aerdrv_errprint.c
index 35d06e1..c6849d9 100644
--- a/drivers/pci/pcie/aer/aerdrv_errprint.c
+++ b/drivers/pci/pcie/aer/aerdrv_errprint.c
@@ -89,15 +89,17 @@
 	NULL,
 	"Replay Timer Timeout",		/* Bit Position 12	*/
 	"Advisory Non-Fatal",		/* Bit Position 13	*/
+	"Corrected Internal Error",	/* Bit Position 14	*/
+	"Header Log Overflow",		/* Bit Position 15	*/
 };
 
 static const char *aer_uncorrectable_error_string[] = {
-	NULL,
+	"Undefined",			/* Bit Position 0	*/
 	NULL,
 	NULL,
 	NULL,
 	"Data Link Protocol",		/* Bit Position 4	*/
-	NULL,
+	"Surprise Down Error",		/* Bit Position 5	*/
 	NULL,
 	NULL,
 	NULL,
@@ -113,6 +115,11 @@
 	"Malformed TLP",		/* Bit Position 18	*/
 	"ECRC",				/* Bit Position 19	*/
 	"Unsupported Request",		/* Bit Position 20	*/
+	"ACS Violation",		/* Bit Position 21	*/
+	"Uncorrectable Internal Error",	/* Bit Position 22	*/
+	"MC Blocked TLP",		/* Bit Position 23	*/
+	"AtomicOp Egress Blocked",	/* Bit Position 24	*/
+	"TLP Prefix Blocked Error",	/* Bit Position 25	*/
 };
 
 static const char *aer_agent_string[] = {
diff --git a/drivers/pci/pcie/portdrv_pci.c b/drivers/pci/pcie/portdrv_pci.c
index 2ccc9b9..be35da2 100644
--- a/drivers/pci/pcie/portdrv_pci.c
+++ b/drivers/pci/pcie/portdrv_pci.c
@@ -93,77 +93,6 @@
 	return 0;
 }
 
-#ifdef CONFIG_PM_RUNTIME
-struct d3cold_info {
-	bool no_d3cold;
-	unsigned int d3cold_delay;
-};
-
-static int pci_dev_d3cold_info(struct pci_dev *pdev, void *data)
-{
-	struct d3cold_info *info = data;
-
-	info->d3cold_delay = max_t(unsigned int, pdev->d3cold_delay,
-				   info->d3cold_delay);
-	if (pdev->no_d3cold)
-		info->no_d3cold = true;
-	return 0;
-}
-
-static int pcie_port_runtime_suspend(struct device *dev)
-{
-	struct pci_dev *pdev = to_pci_dev(dev);
-	struct d3cold_info d3cold_info = {
-		.no_d3cold	= false,
-		.d3cold_delay	= PCI_PM_D3_WAIT,
-	};
-
-	/*
-	 * If any subordinate device disable D3cold, we should not put
-	 * the port into D3cold.  The D3cold delay of port should be
-	 * the max of that of all subordinate devices.
-	 */
-	pci_walk_bus(pdev->subordinate, pci_dev_d3cold_info, &d3cold_info);
-	pdev->no_d3cold = d3cold_info.no_d3cold;
-	pdev->d3cold_delay = d3cold_info.d3cold_delay;
-	return 0;
-}
-
-static int pcie_port_runtime_resume(struct device *dev)
-{
-	return 0;
-}
-
-static int pci_dev_pme_poll(struct pci_dev *pdev, void *data)
-{
-	bool *pme_poll = data;
-
-	if (pdev->pme_poll)
-		*pme_poll = true;
-	return 0;
-}
-
-static int pcie_port_runtime_idle(struct device *dev)
-{
-	struct pci_dev *pdev = to_pci_dev(dev);
-	bool pme_poll = false;
-
-	/*
-	 * If any subordinate device needs pme poll, we should keep
-	 * the port in D0, because we need port in D0 to poll it.
-	 */
-	pci_walk_bus(pdev->subordinate, pci_dev_pme_poll, &pme_poll);
-	/* Delay for a short while to prevent too frequent suspend/resume */
-	if (!pme_poll)
-		pm_schedule_suspend(dev, 10);
-	return -EBUSY;
-}
-#else
-#define pcie_port_runtime_suspend	NULL
-#define pcie_port_runtime_resume	NULL
-#define pcie_port_runtime_idle		NULL
-#endif
-
 static const struct dev_pm_ops pcie_portdrv_pm_ops = {
 	.suspend	= pcie_port_device_suspend,
 	.resume		= pcie_port_device_resume,
@@ -172,9 +101,6 @@
 	.poweroff	= pcie_port_device_suspend,
 	.restore	= pcie_port_device_resume,
 	.resume_noirq	= pcie_port_resume_noirq,
-	.runtime_suspend = pcie_port_runtime_suspend,
-	.runtime_resume = pcie_port_runtime_resume,
-	.runtime_idle	= pcie_port_runtime_idle,
 };
 
 #define PCIE_PORTDRV_PM_OPS	(&pcie_portdrv_pm_ops)
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 636d1c9..efa48dc 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -6,6 +6,7 @@
 #include <linux/delay.h>
 #include <linux/init.h>
 #include <linux/pci.h>
+#include <linux/pci_hotplug.h>
 #include <linux/slab.h>
 #include <linux/module.h>
 #include <linux/cpumask.h>
@@ -744,6 +745,17 @@
 }
 EXPORT_SYMBOL(pci_add_new_bus);
 
+static void pci_enable_crs(struct pci_dev *pdev)
+{
+	u16 root_cap = 0;
+
+	/* Enable CRS Software Visibility if supported */
+	pcie_capability_read_word(pdev, PCI_EXP_RTCAP, &root_cap);
+	if (root_cap & PCI_EXP_RTCAP_CRSVIS)
+		pcie_capability_set_word(pdev, PCI_EXP_RTCTL,
+					 PCI_EXP_RTCTL_CRSSVE);
+}
+
 /*
  * If it's a bridge, configure it and scan the bus behind it.
  * For CardBus bridges, we don't scan behind as the devices will
@@ -791,6 +803,8 @@
 	pci_write_config_word(dev, PCI_BRIDGE_CONTROL,
 			      bctl & ~PCI_BRIDGE_CTL_MASTER_ABORT);
 
+	pci_enable_crs(dev);
+
 	if ((secondary || subordinate) && !pcibios_assign_all_busses() &&
 	    !is_cardbus && !broken) {
 		unsigned int cmax;
@@ -1240,6 +1254,137 @@
 	return 0;
 }
 
+static struct hpp_type0 pci_default_type0 = {
+	.revision = 1,
+	.cache_line_size = 8,
+	.latency_timer = 0x40,
+	.enable_serr = 0,
+	.enable_perr = 0,
+};
+
+static void program_hpp_type0(struct pci_dev *dev, struct hpp_type0 *hpp)
+{
+	u16 pci_cmd, pci_bctl;
+
+	if (!hpp)
+		hpp = &pci_default_type0;
+
+	if (hpp->revision > 1) {
+		dev_warn(&dev->dev,
+			 "PCI settings rev %d not supported; using defaults\n",
+			 hpp->revision);
+		hpp = &pci_default_type0;
+	}
+
+	pci_write_config_byte(dev, PCI_CACHE_LINE_SIZE, hpp->cache_line_size);
+	pci_write_config_byte(dev, PCI_LATENCY_TIMER, hpp->latency_timer);
+	pci_read_config_word(dev, PCI_COMMAND, &pci_cmd);
+	if (hpp->enable_serr)
+		pci_cmd |= PCI_COMMAND_SERR;
+	if (hpp->enable_perr)
+		pci_cmd |= PCI_COMMAND_PARITY;
+	pci_write_config_word(dev, PCI_COMMAND, pci_cmd);
+
+	/* Program bridge control value */
+	if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+		pci_write_config_byte(dev, PCI_SEC_LATENCY_TIMER,
+				      hpp->latency_timer);
+		pci_read_config_word(dev, PCI_BRIDGE_CONTROL, &pci_bctl);
+		if (hpp->enable_serr)
+			pci_bctl |= PCI_BRIDGE_CTL_SERR;
+		if (hpp->enable_perr)
+			pci_bctl |= PCI_BRIDGE_CTL_PARITY;
+		pci_write_config_word(dev, PCI_BRIDGE_CONTROL, pci_bctl);
+	}
+}
+
+static void program_hpp_type1(struct pci_dev *dev, struct hpp_type1 *hpp)
+{
+	if (hpp)
+		dev_warn(&dev->dev, "PCI-X settings not supported\n");
+}
+
+static void program_hpp_type2(struct pci_dev *dev, struct hpp_type2 *hpp)
+{
+	int pos;
+	u32 reg32;
+
+	if (!hpp)
+		return;
+
+	if (hpp->revision > 1) {
+		dev_warn(&dev->dev, "PCIe settings rev %d not supported\n",
+			 hpp->revision);
+		return;
+	}
+
+	/*
+	 * Don't allow _HPX to change MPS or MRRS settings.  We manage
+	 * those to make sure they're consistent with the rest of the
+	 * platform.
+	 */
+	hpp->pci_exp_devctl_and |= PCI_EXP_DEVCTL_PAYLOAD |
+				    PCI_EXP_DEVCTL_READRQ;
+	hpp->pci_exp_devctl_or &= ~(PCI_EXP_DEVCTL_PAYLOAD |
+				    PCI_EXP_DEVCTL_READRQ);
+
+	/* Initialize Device Control Register */
+	pcie_capability_clear_and_set_word(dev, PCI_EXP_DEVCTL,
+			~hpp->pci_exp_devctl_and, hpp->pci_exp_devctl_or);
+
+	/* Initialize Link Control Register */
+	if (dev->subordinate)
+		pcie_capability_clear_and_set_word(dev, PCI_EXP_LNKCTL,
+			~hpp->pci_exp_lnkctl_and, hpp->pci_exp_lnkctl_or);
+
+	/* Find Advanced Error Reporting Enhanced Capability */
+	pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ERR);
+	if (!pos)
+		return;
+
+	/* Initialize Uncorrectable Error Mask Register */
+	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, &reg32);
+	reg32 = (reg32 & hpp->unc_err_mask_and) | hpp->unc_err_mask_or;
+	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_MASK, reg32);
+
+	/* Initialize Uncorrectable Error Severity Register */
+	pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &reg32);
+	reg32 = (reg32 & hpp->unc_err_sever_and) | hpp->unc_err_sever_or;
+	pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, reg32);
+
+	/* Initialize Correctable Error Mask Register */
+	pci_read_config_dword(dev, pos + PCI_ERR_COR_MASK, &reg32);
+	reg32 = (reg32 & hpp->cor_err_mask_and) | hpp->cor_err_mask_or;
+	pci_write_config_dword(dev, pos + PCI_ERR_COR_MASK, reg32);
+
+	/* Initialize Advanced Error Capabilities and Control Register */
+	pci_read_config_dword(dev, pos + PCI_ERR_CAP, &reg32);
+	reg32 = (reg32 & hpp->adv_err_cap_and) | hpp->adv_err_cap_or;
+	pci_write_config_dword(dev, pos + PCI_ERR_CAP, reg32);
+
+	/*
+	 * FIXME: The following two registers are not supported yet.
+	 *
+	 *   o Secondary Uncorrectable Error Severity Register
+	 *   o Secondary Uncorrectable Error Mask Register
+	 */
+}
+
+static void pci_configure_device(struct pci_dev *dev)
+{
+	struct hotplug_params hpp;
+	int ret;
+
+	memset(&hpp, 0, sizeof(hpp));
+	ret = pci_get_hp_params(dev, &hpp);
+	if (ret)
+		return;
+
+	program_hpp_type2(dev, hpp.t2);
+	program_hpp_type1(dev, hpp.t1);
+	program_hpp_type0(dev, hpp.t0);
+}
+
 static void pci_release_capabilities(struct pci_dev *dev)
 {
 	pci_vpd_release(dev);
@@ -1296,8 +1441,13 @@
 	    *l == 0x0000ffff || *l == 0xffff0000)
 		return false;
 
-	/* Configuration request Retry Status */
-	while (*l == 0xffff0001) {
+	/*
+	 * Configuration Request Retry Status.  Some root ports return the
+	 * actual device ID instead of the synthetic ID (0xFFFF) required
+	 * by the PCIe spec.  Ignore the device ID and only check for
+	 * (vendor id == 1).
+	 */
+	while ((*l & 0xffff) == 0x0001) {
 		if (!crs_timeout)
 			return false;
 
@@ -1377,6 +1527,8 @@
 {
 	int ret;
 
+	pci_configure_device(dev);
+
 	device_initialize(&dev->dev);
 	dev->dev.release = pci_release_dev;
 
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 80c2d01..b6c6500 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -24,6 +24,7 @@
 #include <linux/ioport.h>
 #include <linux/sched.h>
 #include <linux/ktime.h>
+#include <linux/mm.h>
 #include <asm/dma.h>	/* isa_dma_bridge_buggy */
 #include "pci.h"
 
@@ -287,6 +288,25 @@
 }
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM,	PCI_DEVICE_ID_IBM_CITRINE,	quirk_citrine);
 
+/*  On IBM Crocodile ipr SAS adapters, expand BAR to system page size */
+static void quirk_extend_bar_to_page(struct pci_dev *dev)
+{
+	int i;
+
+	for (i = 0; i < PCI_STD_RESOURCE_END; i++) {
+		struct resource *r = &dev->resource[i];
+
+		if (r->flags & IORESOURCE_MEM && resource_size(r) < PAGE_SIZE) {
+			r->end = PAGE_SIZE - 1;
+			r->start = 0;
+			r->flags |= IORESOURCE_UNSET;
+			dev_info(&dev->dev, "expanded BAR %d to page size: %pR\n",
+				 i, r);
+		}
+	}
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_IBM, 0x034a, quirk_extend_bar_to_page);
+
 /*
  *  S3 868 and 968 chips report region size equal to 32M, but they decode 64M.
  *  If it's needed, re-allocate the region.
@@ -2985,6 +3005,8 @@
  */
 DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_REALTEK, 0x8169,
 			 quirk_broken_intx_masking);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_MELLANOX, PCI_ANY_ID,
+			 quirk_broken_intx_masking);
 
 #ifdef CONFIG_ACPI
 /*
@@ -3512,57 +3534,6 @@
 /* Intel 82801, https://bugzilla.kernel.org/show_bug.cgi?id=44881#c49 */
 DECLARE_PCI_FIXUP_HEADER(0x8086, 0x244e, quirk_use_pcie_bridge_dma_alias);
 
-static struct pci_dev *pci_func_0_dma_source(struct pci_dev *dev)
-{
-	if (!PCI_FUNC(dev->devfn))
-		return pci_dev_get(dev);
-
-	return pci_get_slot(dev->bus, PCI_DEVFN(PCI_SLOT(dev->devfn), 0));
-}
-
-static const struct pci_dev_dma_source {
-	u16 vendor;
-	u16 device;
-	struct pci_dev *(*dma_source)(struct pci_dev *dev);
-} pci_dev_dma_source[] = {
-	/*
-	 * https://bugzilla.redhat.com/show_bug.cgi?id=605888
-	 *
-	 * Some Ricoh devices use the function 0 source ID for DMA on
-	 * other functions of a multifunction device.  The DMA devices
-	 * is therefore function 0, which will have implications of the
-	 * iommu grouping of these devices.
-	 */
-	{ PCI_VENDOR_ID_RICOH, 0xe822, pci_func_0_dma_source },
-	{ PCI_VENDOR_ID_RICOH, 0xe230, pci_func_0_dma_source },
-	{ PCI_VENDOR_ID_RICOH, 0xe832, pci_func_0_dma_source },
-	{ PCI_VENDOR_ID_RICOH, 0xe476, pci_func_0_dma_source },
-	{ 0 }
-};
-
-/*
- * IOMMUs with isolation capabilities need to be programmed with the
- * correct source ID of a device.  In most cases, the source ID matches
- * the device doing the DMA, but sometimes hardware is broken and will
- * tag the DMA as being sourced from a different device.  This function
- * allows that translation.  Note that the reference count of the
- * returned device is incremented on all paths.
- */
-struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
-{
-	const struct pci_dev_dma_source *i;
-
-	for (i = pci_dev_dma_source; i->dma_source; i++) {
-		if ((i->vendor == dev->vendor ||
-		     i->vendor == (u16)PCI_ANY_ID) &&
-		    (i->device == dev->device ||
-		     i->device == (u16)PCI_ANY_ID))
-			return i->dma_source(dev);
-	}
-
-	return pci_dev_get(dev);
-}
-
 /*
  * AMD has indicated that the devices below do not support peer-to-peer
  * in any system where they are found in the southbridge with an AMD
@@ -3664,6 +3635,23 @@
 	return acs_flags & ~flags ? 0 : 1;
 }
 
+static int pci_quirk_mf_endpoint_acs(struct pci_dev *dev, u16 acs_flags)
+{
+	/*
+	 * SV, TB, and UF are not relevant to multifunction endpoints.
+	 *
+	 * Multifunction devices are only required to implement RR, CR, and DT
+	 * in their ACS capability if they support peer-to-peer transactions.
+	 * Devices matching this quirk have been verified by the vendor to not
+	 * perform peer-to-peer with other functions, allowing us to mask out
+	 * these bits as if they were unimplemented in the ACS capability.
+	 */
+	acs_flags &= ~(PCI_ACS_SV | PCI_ACS_TB | PCI_ACS_RR |
+		       PCI_ACS_CR | PCI_ACS_UF | PCI_ACS_DT);
+
+	return acs_flags ? 0 : 1;
+}
+
 static const struct pci_dev_acs_enabled {
 	u16 vendor;
 	u16 device;
@@ -3675,6 +3663,28 @@
 	{ PCI_VENDOR_ID_ATI, 0x439d, pci_quirk_amd_sb_acs },
 	{ PCI_VENDOR_ID_ATI, 0x4384, pci_quirk_amd_sb_acs },
 	{ PCI_VENDOR_ID_ATI, 0x4399, pci_quirk_amd_sb_acs },
+	{ PCI_VENDOR_ID_SOLARFLARE, 0x0903, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_SOLARFLARE, 0x0923, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10C6, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10DB, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10DD, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10E1, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10F1, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10F7, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10F8, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10F9, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10FA, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10FB, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x10FC, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x1507, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x1514, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x151C, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x1529, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x152A, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x154D, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x154F, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x1551, pci_quirk_mf_endpoint_acs },
+	{ PCI_VENDOR_ID_INTEL, 0x1558, pci_quirk_mf_endpoint_acs },
 	{ PCI_VENDOR_ID_INTEL, PCI_ANY_ID, pci_quirk_intel_pch_acs },
 	{ 0 }
 };
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index 827ad83..a81f413 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -103,40 +103,6 @@
 	return ret;
 }
 
-/*
- * find the upstream PCIe-to-PCI bridge of a PCI device
- * if the device is PCIE, return NULL
- * if the device isn't connected to a PCIe bridge (that is its parent is a
- * legacy PCI bridge and the bridge is directly connected to bus 0), return its
- * parent
- */
-struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev)
-{
-	struct pci_dev *tmp = NULL;
-
-	if (pci_is_pcie(pdev))
-		return NULL;
-	while (1) {
-		if (pci_is_root_bus(pdev->bus))
-			break;
-		pdev = pdev->bus->self;
-		/* a p2p bridge */
-		if (!pci_is_pcie(pdev)) {
-			tmp = pdev;
-			continue;
-		}
-		/* PCI device should connect to a PCIe bridge */
-		if (pci_pcie_type(pdev) != PCI_EXP_TYPE_PCI_BRIDGE) {
-			/* Busted hardware? */
-			WARN_ON_ONCE(1);
-			return NULL;
-		}
-		return pdev;
-	}
-
-	return tmp;
-}
-
 static struct pci_bus *pci_do_find_bus(struct pci_bus *bus, unsigned char busnr)
 {
 	struct pci_bus *child;
diff --git a/drivers/scsi/vmw_pvscsi.h b/drivers/scsi/vmw_pvscsi.h
index ce45888..ee16f0c 100644
--- a/drivers/scsi/vmw_pvscsi.h
+++ b/drivers/scsi/vmw_pvscsi.h
@@ -32,7 +32,6 @@
 
 #define MASK(n)        ((1 << (n)) - 1)        /* make an n-bit mask */
 
-#define PCI_VENDOR_ID_VMWARE		0x15AD
 #define PCI_DEVICE_ID_VMWARE_PVSCSI	0x07C0
 
 /*
diff --git a/drivers/vfio/pci/vfio_pci_config.c b/drivers/vfio/pci/vfio_pci_config.c
index e50790e..1de3f94 100644
--- a/drivers/vfio/pci/vfio_pci_config.c
+++ b/drivers/vfio/pci/vfio_pci_config.c
@@ -727,7 +727,7 @@
 	p_setd(perm, 0, ALL_VIRT, NO_WRITE);
 
 	/* Writable bits mask */
-	mask =	PCI_ERR_UNC_TRAIN |		/* Training */
+	mask =	PCI_ERR_UNC_UND |		/* Undefined */
 		PCI_ERR_UNC_DLP |		/* Data Link Protocol */
 		PCI_ERR_UNC_SURPDN |		/* Surprise Down */
 		PCI_ERR_UNC_POISON_TLP |	/* Poisoned TLP */
diff --git a/drivers/xen/xen-pciback/pci_stub.c b/drivers/xen/xen-pciback/pci_stub.c
index 259ba26..017069a 100644
--- a/drivers/xen/xen-pciback/pci_stub.c
+++ b/drivers/xen/xen-pciback/pci_stub.c
@@ -133,7 +133,7 @@
 	xen_pcibk_config_free_dyn_fields(dev);
 	xen_pcibk_config_free_dev(dev);
 
-	dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+	pci_clear_dev_assigned(dev);
 	pci_dev_put(dev);
 
 	kfree(psdev);
@@ -413,7 +413,7 @@
 	dev_dbg(&dev->dev, "reset device\n");
 	xen_pcibk_reset_device(dev);
 
-	dev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+	pci_set_dev_assigned(dev);
 	return 0;
 
 config_release:
diff --git a/include/linux/aer.h b/include/linux/aer.h
index c826d1c..4fef65e 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -7,6 +7,8 @@
 #ifndef _AER_H_
 #define _AER_H_
 
+#include <linux/types.h>
+
 #define AER_NONFATAL			0
 #define AER_FATAL			1
 #define AER_CORRECTABLE			2
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 142ec54..2c525022 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -215,6 +215,11 @@
 
 /* Wrappers for managed devices */
 struct device;
+
+extern int devm_request_resource(struct device *dev, struct resource *root,
+				 struct resource *new);
+extern void devm_release_resource(struct device *dev, struct resource *new);
+
 #define devm_request_region(dev,start,n,name) \
 	__devm_request_region(dev, &ioport_resource, (start), (n), (name))
 #define devm_request_mem_region(dev,start,n,name) \
diff --git a/include/linux/pci.h b/include/linux/pci.h
index b4995fd..9cd2721 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -45,7 +45,7 @@
  * In the interest of not exposing interfaces to user-space unnecessarily,
  * the following kernel-only defines are being added here.
  */
-#define PCI_DEVID(bus, devfn)  ((((u16)bus) << 8) | devfn)
+#define PCI_DEVID(bus, devfn)  ((((u16)(bus)) << 8) | (devfn))
 /* return bus from PCI devid = ((u16)bus_number) << 8) | devfn */
 #define PCI_BUS_NUM(x) (((x) >> 8) & 0xff)
 
@@ -1584,16 +1584,11 @@
 
 #ifdef CONFIG_PCI_QUIRKS
 void pci_fixup_device(enum pci_fixup_pass pass, struct pci_dev *dev);
-struct pci_dev *pci_get_dma_source(struct pci_dev *dev);
 int pci_dev_specific_acs_enabled(struct pci_dev *dev, u16 acs_flags);
 void pci_dev_specific_enable_acs(struct pci_dev *dev);
 #else
 static inline void pci_fixup_device(enum pci_fixup_pass pass,
 				    struct pci_dev *dev) { }
-static inline struct pci_dev *pci_get_dma_source(struct pci_dev *dev)
-{
-	return pci_dev_get(dev);
-}
 static inline int pci_dev_specific_acs_enabled(struct pci_dev *dev,
 					       u16 acs_flags)
 {
@@ -1728,7 +1723,7 @@
 			  struct pci_dev *end, u16 acs_flags);
 
 #define PCI_VPD_LRDT			0x80	/* Large Resource Data Type */
-#define PCI_VPD_LRDT_ID(x)		(x | PCI_VPD_LRDT)
+#define PCI_VPD_LRDT_ID(x)		((x) | PCI_VPD_LRDT)
 
 /* Large Resource Data Type Tag Item Names */
 #define PCI_VPD_LTIN_ID_STRING		0x02	/* Identifier String */
@@ -1855,15 +1850,17 @@
 			   int (*fn)(struct pci_dev *pdev,
 				     u16 alias, void *data), void *data);
 
-/**
- * pci_find_upstream_pcie_bridge - find upstream PCIe-to-PCI bridge of a device
- * @pdev: the PCI device
- *
- * if the device is PCIE, return NULL
- * if the device isn't connected to a PCIe bridge (that is its parent is a
- * legacy PCI bridge and the bridge is directly connected to bus 0), return its
- * parent
- */
-struct pci_dev *pci_find_upstream_pcie_bridge(struct pci_dev *pdev);
-
+/* helper functions for operation of device flag */
+static inline void pci_set_dev_assigned(struct pci_dev *pdev)
+{
+	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+}
+static inline void pci_clear_dev_assigned(struct pci_dev *pdev)
+{
+	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+}
+static inline bool pci_is_dev_assigned(struct pci_dev *pdev)
+{
+	return (pdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED) == PCI_DEV_FLAGS_ASSIGNED;
+}
 #endif /* LINUX_PCI_H */
diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
index 5f2e559..2706ee9 100644
--- a/include/linux/pci_hotplug.h
+++ b/include/linux/pci_hotplug.h
@@ -187,6 +187,4 @@
 	return -ENODEV;
 }
 #endif
-
-void pci_configure_slot(struct pci_dev *dev);
 #endif
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 6ed0bb7..da9e6f7 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2245,6 +2245,8 @@
 #define PCI_VENDOR_ID_MORETON		0x15aa
 #define PCI_DEVICE_ID_RASTEL_2PORT	0x2000
 
+#define PCI_VENDOR_ID_VMWARE		0x15ad
+
 #define PCI_VENDOR_ID_ZOLTRIX		0x15b0
 #define PCI_DEVICE_ID_ZOLTRIX_2BD0	0x2bd0
 
diff --git a/include/ras/ras_event.h b/include/ras/ras_event.h
index 47da53c..79abb9c 100644
--- a/include/ras/ras_event.h
+++ b/include/ras/ras_event.h
@@ -8,6 +8,7 @@
 #include <linux/tracepoint.h>
 #include <linux/edac.h>
 #include <linux/ktime.h>
+#include <linux/pci.h>
 #include <linux/aer.h>
 #include <linux/cper.h>
 
@@ -173,25 +174,34 @@
  * u8 severity -	error severity 0:NONFATAL 1:FATAL 2:CORRECTED
  */
 
-#define aer_correctable_errors		\
-	{BIT(0),	"Receiver Error"},		\
-	{BIT(6),	"Bad TLP"},			\
-	{BIT(7),	"Bad DLLP"},			\
-	{BIT(8),	"RELAY_NUM Rollover"},		\
-	{BIT(12),	"Replay Timer Timeout"},	\
-	{BIT(13),	"Advisory Non-Fatal"}
+#define aer_correctable_errors					\
+	{PCI_ERR_COR_RCVR,	"Receiver Error"},		\
+	{PCI_ERR_COR_BAD_TLP,	"Bad TLP"},			\
+	{PCI_ERR_COR_BAD_DLLP,	"Bad DLLP"},			\
+	{PCI_ERR_COR_REP_ROLL,	"RELAY_NUM Rollover"},		\
+	{PCI_ERR_COR_REP_TIMER,	"Replay Timer Timeout"},	\
+	{PCI_ERR_COR_ADV_NFAT,	"Advisory Non-Fatal Error"},	\
+	{PCI_ERR_COR_INTERNAL,	"Corrected Internal Error"},	\
+	{PCI_ERR_COR_LOG_OVER,	"Header Log Overflow"}
 
-#define aer_uncorrectable_errors		\
-	{BIT(4),	"Data Link Protocol"},		\
-	{BIT(12),	"Poisoned TLP"},		\
-	{BIT(13),	"Flow Control Protocol"},	\
-	{BIT(14),	"Completion Timeout"},		\
-	{BIT(15),	"Completer Abort"},		\
-	{BIT(16),	"Unexpected Completion"},	\
-	{BIT(17),	"Receiver Overflow"},		\
-	{BIT(18),	"Malformed TLP"},		\
-	{BIT(19),	"ECRC"},			\
-	{BIT(20),	"Unsupported Request"}
+#define aer_uncorrectable_errors				\
+	{PCI_ERR_UNC_UND,	"Undefined"},			\
+	{PCI_ERR_UNC_DLP,	"Data Link Protocol Error"},	\
+	{PCI_ERR_UNC_SURPDN,	"Surprise Down Error"},		\
+	{PCI_ERR_UNC_POISON_TLP,"Poisoned TLP"},		\
+	{PCI_ERR_UNC_FCP,	"Flow Control Protocol Error"},	\
+	{PCI_ERR_UNC_COMP_TIME,	"Completion Timeout"},		\
+	{PCI_ERR_UNC_COMP_ABORT,"Completer Abort"},		\
+	{PCI_ERR_UNC_UNX_COMP,	"Unexpected Completion"},	\
+	{PCI_ERR_UNC_RX_OVER,	"Receiver Overflow"},		\
+	{PCI_ERR_UNC_MALF_TLP,	"Malformed TLP"},		\
+	{PCI_ERR_UNC_ECRC,	"ECRC Error"},			\
+	{PCI_ERR_UNC_UNSUP,	"Unsupported Request Error"},	\
+	{PCI_ERR_UNC_ACSV,	"ACS Violation"},		\
+	{PCI_ERR_UNC_INTN,	"Uncorrectable Internal Error"},\
+	{PCI_ERR_UNC_MCBTLP,	"MC Blocked TLP"},		\
+	{PCI_ERR_UNC_ATOMEG,	"AtomicOp Egress Blocked"},	\
+	{PCI_ERR_UNC_TLPPRE,	"TLP Prefix Blocked Error"}
 
 TRACE_EVENT(aer_event,
 	TP_PROTO(const char *dev_name,
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 30db069..4a1d0cc 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -552,6 +552,7 @@
 #define  PCI_EXP_RTCTL_PMEIE	0x0008	/* PME Interrupt Enable */
 #define  PCI_EXP_RTCTL_CRSSVE	0x0010	/* CRS Software Visibility Enable */
 #define PCI_EXP_RTCAP		30	/* Root Capabilities */
+#define  PCI_EXP_RTCAP_CRSVIS	0x0001	/* CRS Software Visibility capability */
 #define PCI_EXP_RTSTA		32	/* Root Status */
 #define PCI_EXP_RTSTA_PME	0x00010000 /* PME status */
 #define PCI_EXP_RTSTA_PENDING	0x00020000 /* PME pending */
@@ -630,7 +631,7 @@
 
 /* Advanced Error Reporting */
 #define PCI_ERR_UNCOR_STATUS	4	/* Uncorrectable Error Status */
-#define  PCI_ERR_UNC_TRAIN	0x00000001	/* Training */
+#define  PCI_ERR_UNC_UND	0x00000001	/* Undefined */
 #define  PCI_ERR_UNC_DLP	0x00000010	/* Data Link Protocol */
 #define  PCI_ERR_UNC_SURPDN	0x00000020	/* Surprise Down */
 #define  PCI_ERR_UNC_POISON_TLP	0x00001000	/* Poisoned TLP */
diff --git a/kernel/resource.c b/kernel/resource.c
index da14b8d..ca24f19 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1248,6 +1248,76 @@
 /*
  * Managed region resource
  */
+static void devm_resource_release(struct device *dev, void *ptr)
+{
+	struct resource **r = ptr;
+
+	release_resource(*r);
+}
+
+/**
+ * devm_request_resource() - request and reserve an I/O or memory resource
+ * @dev: device for which to request the resource
+ * @root: root of the resource tree from which to request the resource
+ * @new: descriptor of the resource to request
+ *
+ * This is a device-managed version of request_resource(). There is usually
+ * no need to release resources requested by this function explicitly since
+ * that will be taken care of when the device is unbound from its driver.
+ * If for some reason the resource needs to be released explicitly, because
+ * of ordering issues for example, drivers must call devm_release_resource()
+ * rather than the regular release_resource().
+ *
+ * When a conflict is detected between any existing resources and the newly
+ * requested resource, an error message will be printed.
+ *
+ * Returns 0 on success or a negative error code on failure.
+ */
+int devm_request_resource(struct device *dev, struct resource *root,
+			  struct resource *new)
+{
+	struct resource *conflict, **ptr;
+
+	ptr = devres_alloc(devm_resource_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return -ENOMEM;
+
+	*ptr = new;
+
+	conflict = request_resource_conflict(root, new);
+	if (conflict) {
+		dev_err(dev, "resource collision: %pR conflicts with %s %pR\n",
+			new, conflict->name, conflict);
+		devres_free(ptr);
+		return -EBUSY;
+	}
+
+	devres_add(dev, ptr);
+	return 0;
+}
+EXPORT_SYMBOL(devm_request_resource);
+
+static int devm_resource_match(struct device *dev, void *res, void *data)
+{
+	struct resource **ptr = res;
+
+	return *ptr == data;
+}
+
+/**
+ * devm_release_resource() - release a previously requested resource
+ * @dev: device for which to release the resource
+ * @new: descriptor of the resource to release
+ *
+ * Releases a resource previously requested using devm_request_resource().
+ */
+void devm_release_resource(struct device *dev, struct resource *new)
+{
+	WARN_ON(devres_release(dev, devm_resource_release, devm_resource_match,
+			       new));
+}
+EXPORT_SYMBOL(devm_release_resource);
+
 struct region_devres {
 	struct resource *parent;
 	resource_size_t start;
diff --git a/virt/kvm/assigned-dev.c b/virt/kvm/assigned-dev.c
index 5819a27..e05000e 100644
--- a/virt/kvm/assigned-dev.c
+++ b/virt/kvm/assigned-dev.c
@@ -302,7 +302,7 @@
 	else
 		pci_restore_state(assigned_dev->dev);
 
-	assigned_dev->dev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+	pci_clear_dev_assigned(assigned_dev->dev);
 
 	pci_release_regions(assigned_dev->dev);
 	pci_disable_device(assigned_dev->dev);
diff --git a/virt/kvm/iommu.c b/virt/kvm/iommu.c
index 714b949..e723bb9 100644
--- a/virt/kvm/iommu.c
+++ b/virt/kvm/iommu.c
@@ -203,7 +203,7 @@
 			goto out_unmap;
 	}
 
-	pdev->dev_flags |= PCI_DEV_FLAGS_ASSIGNED;
+	pci_set_dev_assigned(pdev);
 
 	dev_info(&pdev->dev, "kvm assign device\n");
 
@@ -229,7 +229,7 @@
 
 	iommu_detach_device(domain, &pdev->dev);
 
-	pdev->dev_flags &= ~PCI_DEV_FLAGS_ASSIGNED;
+	pci_clear_dev_assigned(pdev);
 
 	dev_info(&pdev->dev, "kvm deassign device\n");