Merge branch 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull irq fixes from Thomas Gleixner:
 "Three fixes for the ARM GIC interrupt controller from Marc addressing
  various shortcomings versus boot initialization and suspend/resume"

* 'irq-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  irqchip/gic: Add save/restore of the active state
  irqchip/gic: Clear enable bits before restoring them
  irqchip/gic: Make sure all interrupts are deactivated at boot
diff --git a/Documentation/IPMI.txt b/Documentation/IPMI.txt
index 31d1d65..c0d8788 100644
--- a/Documentation/IPMI.txt
+++ b/Documentation/IPMI.txt
@@ -587,7 +587,7 @@
 
   modprobe ipmi_watchdog timeout=<t> pretimeout=<t> action=<action type>
       preaction=<preaction type> preop=<preop type> start_now=x
-      nowayout=x ifnum_to_use=n
+      nowayout=x ifnum_to_use=n panic_wdt_timeout=<t>
 
 ifnum_to_use specifies which interface the watchdog timer should use.
 The default is -1, which means to pick the first one registered.
@@ -597,7 +597,9 @@
 occur (if pretimeout is zero, then pretimeout will not be enabled).  Note
 that the pretimeout is the time before the final timeout.  So if the
 timeout is 50 seconds and the pretimeout is 10 seconds, then the pretimeout
-will occur in 40 second (10 seconds before the timeout).
+will occur in 40 second (10 seconds before the timeout). The panic_wdt_timeout
+is the value of timeout which is set on kernel panic, in order to let actions
+such as kdump to occur during panic.
 
 The action may be "reset", "power_cycle", or "power_off", and
 specifies what to do when the timer times out, and defaults to
@@ -634,6 +636,7 @@
 	ipmi_watchdog.preop=<preop type>
 	ipmi_watchdog.start_now=x
 	ipmi_watchdog.nowayout=x
+	ipmi_watchdog.panic_wdt_timeout=<t>
 
 The options are the same as the module parameter options.
 
diff --git a/Documentation/i2c/busses/i2c-i801 b/Documentation/i2c/busses/i2c-i801
index 6a4b1af..1bba38d 100644
--- a/Documentation/i2c/busses/i2c-i801
+++ b/Documentation/i2c/busses/i2c-i801
@@ -32,6 +32,7 @@
   * Intel Sunrise Point-LP (PCH)
   * Intel DNV (SOC)
   * Intel Broxton (SOC)
+  * Intel Lewisburg (PCH)
    Datasheets: Publicly available at the Intel website
 
 On Intel Patsburg and later chipsets, both the normal host SMBus controller
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f8aae63..742f69d 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1583,9 +1583,6 @@
 		hwp_only
 			Only load intel_pstate on systems which support
 			hardware P state control (HWP) if available.
-		no_acpi
-			Don't use ACPI processor performance control objects
-			_PSS and _PPC specified limits.
 
 	intremap=	[X86-64, Intel-IOMMU]
 			on	enable Interrupt Remapping (default)
diff --git a/MAINTAINERS b/MAINTAINERS
index e9caa4b..557a3ed 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -2449,7 +2449,9 @@
 
 BROADCOM STB NAND FLASH DRIVER
 M:	Brian Norris <computersforpeace@gmail.com>
+M:	Kamal Dasu <kdasu.kdev@gmail.com>
 L:	linux-mtd@lists.infradead.org
+L:	bcm-kernel-feedback-list@broadcom.com
 S:	Maintained
 F:	drivers/mtd/nand/brcmnand/
 
@@ -2546,7 +2548,7 @@
 
 CACHEFILES: FS-CACHE BACKEND FOR CACHING ON MOUNTED FILESYSTEMS
 M:	David Howells <dhowells@redhat.com>
-L:	linux-cachefs@redhat.com
+L:	linux-cachefs@redhat.com (moderated for non-subscribers)
 S:	Supported
 F:	Documentation/filesystems/caching/cachefiles.txt
 F:	fs/cachefiles/
@@ -4559,7 +4561,7 @@
 
 FS-CACHE: LOCAL CACHING FOR NETWORK FILESYSTEMS
 M:	David Howells <dhowells@redhat.com>
-L:	linux-cachefs@redhat.com
+L:	linux-cachefs@redhat.com (moderated for non-subscribers)
 S:	Supported
 F:	Documentation/filesystems/caching/
 F:	fs/fscache/
@@ -5711,13 +5713,6 @@
 S:	Maintained
 F:	net/ipv4/netfilter/ipt_MASQUERADE.c
 
-IP1000A 10/100/1000 GIGABIT ETHERNET DRIVER
-M:	Francois Romieu <romieu@fr.zoreil.com>
-M:	Sorbica Shieh <sorbica@icplus.com.tw>
-L:	netdev@vger.kernel.org
-S:	Maintained
-F:	drivers/net/ethernet/icplus/ipg.*
-
 IPATH DRIVER
 M:	Mike Marciniszyn <infinipath@intel.com>
 L:	linux-rdma@vger.kernel.org
@@ -6923,13 +6918,21 @@
 F:	drivers/scsi/megaraid/
 
 MELLANOX ETHERNET DRIVER (mlx4_en)
-M:	Amir Vadai <amirv@mellanox.com>
+M: 	Eugenia Emantayev <eugenia@mellanox.com>
 L:	netdev@vger.kernel.org
 S:	Supported
 W:	http://www.mellanox.com
 Q:	http://patchwork.ozlabs.org/project/netdev/list/
 F:	drivers/net/ethernet/mellanox/mlx4/en_*
 
+MELLANOX ETHERNET DRIVER (mlx5e)
+M:	Saeed Mahameed <saeedm@mellanox.com>
+L:	netdev@vger.kernel.org
+S:	Supported
+W:	http://www.mellanox.com
+Q:	http://patchwork.ozlabs.org/project/netdev/list/
+F:	drivers/net/ethernet/mellanox/mlx5/core/en_*
+
 MELLANOX ETHERNET SWITCH DRIVERS
 M:	Jiri Pirko <jiri@mellanox.com>
 M:	Ido Schimmel <idosch@mellanox.com>
@@ -9314,7 +9317,6 @@
 F:	include/linux/platform_data/i2c-designware.h
 
 SYNOPSYS DESIGNWARE MMC/SD/SDIO DRIVER
-M:	Seungwon Jeon <tgih.jun@samsung.com>
 M:	Jaehoon Chung <jh80.chung@samsung.com>
 L:	linux-mmc@vger.kernel.org
 S:	Maintained
diff --git a/arch/arm/net/bpf_jit_32.c b/arch/arm/net/bpf_jit_32.c
index 2f4b14c..591f9db 100644
--- a/arch/arm/net/bpf_jit_32.c
+++ b/arch/arm/net/bpf_jit_32.c
@@ -1061,7 +1061,7 @@
 	}
 	build_epilogue(&ctx);
 
-	flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
+	flush_icache_range((u32)header, (u32)(ctx.target + ctx.idx));
 
 #if __LINUX_ARM_ARCH__ < 7
 	if (ctx.imm_count)
diff --git a/arch/arm64/crypto/aes-ce-cipher.c b/arch/arm64/crypto/aes-ce-cipher.c
index ce47792..f7bd9bf 100644
--- a/arch/arm64/crypto/aes-ce-cipher.c
+++ b/arch/arm64/crypto/aes-ce-cipher.c
@@ -237,7 +237,7 @@
 static struct crypto_alg aes_alg = {
 	.cra_name		= "aes",
 	.cra_driver_name	= "aes-ce",
-	.cra_priority		= 300,
+	.cra_priority		= 250,
 	.cra_flags		= CRYPTO_ALG_TYPE_CIPHER,
 	.cra_blocksize		= AES_BLOCK_SIZE,
 	.cra_ctxsize		= sizeof(struct crypto_aes_ctx),
diff --git a/arch/arm64/include/asm/barrier.h b/arch/arm64/include/asm/barrier.h
index 624f967..9622eb4 100644
--- a/arch/arm64/include/asm/barrier.h
+++ b/arch/arm64/include/asm/barrier.h
@@ -64,27 +64,31 @@
 
 #define smp_load_acquire(p)						\
 ({									\
-	typeof(*p) ___p1;						\
+	union { typeof(*p) __val; char __c[1]; } __u;			\
 	compiletime_assert_atomic_type(*p);				\
 	switch (sizeof(*p)) {						\
 	case 1:								\
 		asm volatile ("ldarb %w0, %1"				\
-			: "=r" (___p1) : "Q" (*p) : "memory");		\
+			: "=r" (*(__u8 *)__u.__c)			\
+			: "Q" (*p) : "memory");				\
 		break;							\
 	case 2:								\
 		asm volatile ("ldarh %w0, %1"				\
-			: "=r" (___p1) : "Q" (*p) : "memory");		\
+			: "=r" (*(__u16 *)__u.__c)			\
+			: "Q" (*p) : "memory");				\
 		break;							\
 	case 4:								\
 		asm volatile ("ldar %w0, %1"				\
-			: "=r" (___p1) : "Q" (*p) : "memory");		\
+			: "=r" (*(__u32 *)__u.__c)			\
+			: "Q" (*p) : "memory");				\
 		break;							\
 	case 8:								\
 		asm volatile ("ldar %0, %1"				\
-			: "=r" (___p1) : "Q" (*p) : "memory");		\
+			: "=r" (*(__u64 *)__u.__c)			\
+			: "Q" (*p) : "memory");				\
 		break;							\
 	}								\
-	___p1;								\
+	__u.__val;							\
 })
 
 #define read_barrier_depends()		do { } while(0)
diff --git a/arch/arm64/include/asm/compat.h b/arch/arm64/include/asm/compat.h
index 7fbed69..eb8432b 100644
--- a/arch/arm64/include/asm/compat.h
+++ b/arch/arm64/include/asm/compat.h
@@ -23,7 +23,6 @@
  */
 #include <linux/types.h>
 #include <linux/sched.h>
-#include <linux/ptrace.h>
 
 #define COMPAT_USER_HZ		100
 #ifdef __AARCH64EB__
@@ -234,7 +233,7 @@
 	return (u32)(unsigned long)uptr;
 }
 
-#define compat_user_stack_pointer() (user_stack_pointer(current_pt_regs()))
+#define compat_user_stack_pointer() (user_stack_pointer(task_pt_regs(current)))
 
 static inline void __user *arch_compat_alloc_user_space(long len)
 {
diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h
index 54d0ead..61e08f3 100644
--- a/arch/arm64/include/asm/dma-mapping.h
+++ b/arch/arm64/include/asm/dma-mapping.h
@@ -18,7 +18,6 @@
 
 #ifdef __KERNEL__
 
-#include <linux/acpi.h>
 #include <linux/types.h>
 #include <linux/vmalloc.h>
 
@@ -26,22 +25,16 @@
 #include <asm/xen/hypervisor.h>
 
 #define DMA_ERROR_CODE	(~(dma_addr_t)0)
-extern struct dma_map_ops *dma_ops;
 extern struct dma_map_ops dummy_dma_ops;
 
 static inline struct dma_map_ops *__generic_dma_ops(struct device *dev)
 {
-	if (unlikely(!dev))
-		return dma_ops;
-	else if (dev->archdata.dma_ops)
+	if (dev && dev->archdata.dma_ops)
 		return dev->archdata.dma_ops;
-	else if (acpi_disabled)
-		return dma_ops;
 
 	/*
-	 * When ACPI is enabled, if arch_set_dma_ops is not called,
-	 * we will disable device DMA capability by setting it
-	 * to dummy_dma_ops.
+	 * We expect no ISA devices, and all other DMA masters are expected to
+	 * have someone call arch_setup_dma_ops at device creation time.
 	 */
 	return &dummy_dma_ops;
 }
diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h
index c0e8789..2416578 100644
--- a/arch/arm64/include/asm/mmu_context.h
+++ b/arch/arm64/include/asm/mmu_context.h
@@ -101,7 +101,7 @@
 #define destroy_context(mm)		do { } while(0)
 void check_and_switch_context(struct mm_struct *mm, unsigned int cpu);
 
-#define init_new_context(tsk,mm)	({ atomic64_set(&mm->context.id, 0); 0; })
+#define init_new_context(tsk,mm)	({ atomic64_set(&(mm)->context.id, 0); 0; })
 
 /*
  * This is called when "tsk" is about to enter lazy TLB mode.
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9819a94..7e074f9 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -81,6 +81,7 @@
 
 #define PAGE_KERNEL		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_RO		__pgprot(_PAGE_DEFAULT | PTE_PXN | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
+#define PAGE_KERNEL_ROX	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_RDONLY)
 #define PAGE_KERNEL_EXEC	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE)
 #define PAGE_KERNEL_EXEC_CONT	__pgprot(_PAGE_DEFAULT | PTE_UXN | PTE_DIRTY | PTE_WRITE | PTE_CONT)
 
diff --git a/arch/arm64/kernel/cpuinfo.c b/arch/arm64/kernel/cpuinfo.c
index 706679d..212ae63 100644
--- a/arch/arm64/kernel/cpuinfo.c
+++ b/arch/arm64/kernel/cpuinfo.c
@@ -30,6 +30,7 @@
 #include <linux/seq_file.h>
 #include <linux/sched.h>
 #include <linux/smp.h>
+#include <linux/delay.h>
 
 /*
  * In case the boot CPU is hotpluggable, we record its initial state and
@@ -112,6 +113,10 @@
 		 */
 		seq_printf(m, "processor\t: %d\n", i);
 
+		seq_printf(m, "BogoMIPS\t: %lu.%02lu\n",
+			   loops_per_jiffy / (500000UL/HZ),
+			   loops_per_jiffy / (5000UL/HZ) % 100);
+
 		/*
 		 * Dump out the common processor features in a single line.
 		 * Userspace should read the hwcaps with getauxval(AT_HWCAP)
diff --git a/arch/arm64/kernel/efi.c b/arch/arm64/kernel/efi.c
index de46b50..fc5508e 100644
--- a/arch/arm64/kernel/efi.c
+++ b/arch/arm64/kernel/efi.c
@@ -224,6 +224,8 @@
 {
 	efi_memory_desc_t *md;
 
+	init_new_context(NULL, &efi_mm);
+
 	for_each_efi_memory_desc(&memmap, md) {
 		u64 paddr, npages, size;
 		pgprot_t prot;
@@ -254,7 +256,8 @@
 		else
 			prot = PAGE_KERNEL;
 
-		create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size, prot);
+		create_pgd_mapping(&efi_mm, paddr, md->virt_addr, size,
+				   __pgprot(pgprot_val(prot) | PTE_NG));
 	}
 	return true;
 }
@@ -329,14 +332,7 @@
 
 static void efi_set_pgd(struct mm_struct *mm)
 {
-	if (mm == &init_mm)
-		cpu_set_reserved_ttbr0();
-	else
-		cpu_switch_mm(mm->pgd, mm);
-
-	local_flush_tlb_all();
-	if (icache_is_aivivt())
-		__local_flush_icache_all();
+	switch_mm(NULL, mm, NULL);
 }
 
 void efi_virtmap_load(void)
diff --git a/arch/arm64/kernel/suspend.c b/arch/arm64/kernel/suspend.c
index fce95e1..1095aa4 100644
--- a/arch/arm64/kernel/suspend.c
+++ b/arch/arm64/kernel/suspend.c
@@ -1,3 +1,4 @@
+#include <linux/ftrace.h>
 #include <linux/percpu.h>
 #include <linux/slab.h>
 #include <asm/cacheflush.h>
@@ -71,6 +72,13 @@
 	local_dbg_save(flags);
 
 	/*
+	 * Function graph tracer state gets incosistent when the kernel
+	 * calls functions that never return (aka suspend finishers) hence
+	 * disable graph tracing during their execution.
+	 */
+	pause_graph_tracing();
+
+	/*
 	 * mm context saved on the stack, it will be restored when
 	 * the cpu comes out of reset through the identity mapped
 	 * page tables, so that the thread address space is properly
@@ -111,6 +119,8 @@
 			hw_breakpoint_restore(NULL);
 	}
 
+	unpause_graph_tracing();
+
 	/*
 	 * Restore pstate flags. OS lock and mdscr have been already
 	 * restored, so from this point onwards, debugging is fully
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 131a199..7963aa4 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -18,6 +18,7 @@
  */
 
 #include <linux/gfp.h>
+#include <linux/acpi.h>
 #include <linux/export.h>
 #include <linux/slab.h>
 #include <linux/genalloc.h>
@@ -28,9 +29,6 @@
 
 #include <asm/cacheflush.h>
 
-struct dma_map_ops *dma_ops;
-EXPORT_SYMBOL(dma_ops);
-
 static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
 				 bool coherent)
 {
@@ -515,13 +513,7 @@
 
 static int __init arm64_dma_init(void)
 {
-	int ret;
-
-	dma_ops = &swiotlb_dma_ops;
-
-	ret = atomic_pool_init();
-
-	return ret;
+	return atomic_pool_init();
 }
 arch_initcall(arm64_dma_init);
 
@@ -552,10 +544,14 @@
 {
 	bool coherent = is_device_dma_coherent(dev);
 	int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
+	size_t iosize = size;
 	void *addr;
 
 	if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
 		return NULL;
+
+	size = PAGE_ALIGN(size);
+
 	/*
 	 * Some drivers rely on this, and we probably don't want the
 	 * possibility of stale kernel data being read by devices anyway.
@@ -566,7 +562,7 @@
 		struct page **pages;
 		pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
 
-		pages = iommu_dma_alloc(dev, size, gfp, ioprot,	handle,
+		pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
 					flush_page);
 		if (!pages)
 			return NULL;
@@ -574,7 +570,7 @@
 		addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
 					      __builtin_return_address(0));
 		if (!addr)
-			iommu_dma_free(dev, pages, size, handle);
+			iommu_dma_free(dev, pages, iosize, handle);
 	} else {
 		struct page *page;
 		/*
@@ -591,7 +587,7 @@
 		if (!addr)
 			return NULL;
 
-		*handle = iommu_dma_map_page(dev, page, 0, size, ioprot);
+		*handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
 		if (iommu_dma_mapping_error(dev, *handle)) {
 			if (coherent)
 				__free_pages(page, get_order(size));
@@ -606,6 +602,9 @@
 static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
 			       dma_addr_t handle, struct dma_attrs *attrs)
 {
+	size_t iosize = size;
+
+	size = PAGE_ALIGN(size);
 	/*
 	 * @cpu_addr will be one of 3 things depending on how it was allocated:
 	 * - A remapped array of pages from iommu_dma_alloc(), for all
@@ -617,17 +616,17 @@
 	 * Hence how dodgy the below logic looks...
 	 */
 	if (__in_atomic_pool(cpu_addr, size)) {
-		iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+		iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
 		__free_from_pool(cpu_addr, size);
 	} else if (is_vmalloc_addr(cpu_addr)){
 		struct vm_struct *area = find_vm_area(cpu_addr);
 
 		if (WARN_ON(!area || !area->pages))
 			return;
-		iommu_dma_free(dev, area->pages, size, &handle);
+		iommu_dma_free(dev, area->pages, iosize, &handle);
 		dma_common_free_remap(cpu_addr, size, VM_USERMAP);
 	} else {
-		iommu_dma_unmap_page(dev, handle, size, 0, NULL);
+		iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
 		__free_pages(virt_to_page(cpu_addr), get_order(size));
 	}
 }
@@ -984,8 +983,8 @@
 void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
 			struct iommu_ops *iommu, bool coherent)
 {
-	if (!acpi_disabled && !dev->archdata.dma_ops)
-		dev->archdata.dma_ops = dma_ops;
+	if (!dev->archdata.dma_ops)
+		dev->archdata.dma_ops = &swiotlb_dma_ops;
 
 	dev->archdata.dma_coherent = coherent;
 	__iommu_setup_dma_ops(dev, dma_base, size, iommu);
diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c
index e3f563c..abb66f8 100644
--- a/arch/arm64/mm/mmu.c
+++ b/arch/arm64/mm/mmu.c
@@ -362,8 +362,8 @@
 	 * for now. This will get more fine grained later once all memory
 	 * is mapped
 	 */
-	unsigned long kernel_x_start = round_down(__pa(_stext), SECTION_SIZE);
-	unsigned long kernel_x_end = round_up(__pa(__init_end), SECTION_SIZE);
+	unsigned long kernel_x_start = round_down(__pa(_stext), SWAPPER_BLOCK_SIZE);
+	unsigned long kernel_x_end = round_up(__pa(__init_end), SWAPPER_BLOCK_SIZE);
 
 	if (end < kernel_x_start) {
 		create_mapping(start, __phys_to_virt(start),
@@ -451,18 +451,18 @@
 {
 #ifdef CONFIG_DEBUG_RODATA
 	/* now that we are actually fully mapped, make the start/end more fine grained */
-	if (!IS_ALIGNED((unsigned long)_stext, SECTION_SIZE)) {
+	if (!IS_ALIGNED((unsigned long)_stext, SWAPPER_BLOCK_SIZE)) {
 		unsigned long aligned_start = round_down(__pa(_stext),
-							SECTION_SIZE);
+							 SWAPPER_BLOCK_SIZE);
 
 		create_mapping(aligned_start, __phys_to_virt(aligned_start),
 				__pa(_stext) - aligned_start,
 				PAGE_KERNEL);
 	}
 
-	if (!IS_ALIGNED((unsigned long)__init_end, SECTION_SIZE)) {
+	if (!IS_ALIGNED((unsigned long)__init_end, SWAPPER_BLOCK_SIZE)) {
 		unsigned long aligned_end = round_up(__pa(__init_end),
-							SECTION_SIZE);
+							  SWAPPER_BLOCK_SIZE);
 		create_mapping(__pa(__init_end), (unsigned long)__init_end,
 				aligned_end - __pa(__init_end),
 				PAGE_KERNEL);
@@ -475,7 +475,7 @@
 {
 	create_mapping_late(__pa(_stext), (unsigned long)_stext,
 				(unsigned long)_etext - (unsigned long)_stext,
-				PAGE_KERNEL_EXEC | PTE_RDONLY);
+				PAGE_KERNEL_ROX);
 
 }
 #endif
diff --git a/arch/arm64/net/bpf_jit_comp.c b/arch/arm64/net/bpf_jit_comp.c
index cf3c7d4..d6a53ef 100644
--- a/arch/arm64/net/bpf_jit_comp.c
+++ b/arch/arm64/net/bpf_jit_comp.c
@@ -50,7 +50,7 @@
 	[BPF_REG_8] = A64_R(21),
 	[BPF_REG_9] = A64_R(22),
 	/* read-only frame pointer to access stack */
-	[BPF_REG_FP] = A64_FP,
+	[BPF_REG_FP] = A64_R(25),
 	/* temporary register for internal BPF JIT */
 	[TMP_REG_1] = A64_R(23),
 	[TMP_REG_2] = A64_R(24),
@@ -155,18 +155,49 @@
 	stack_size += 4; /* extra for skb_copy_bits buffer */
 	stack_size = STACK_ALIGN(stack_size);
 
+	/*
+	 * BPF prog stack layout
+	 *
+	 *                         high
+	 * original A64_SP =>   0:+-----+ BPF prologue
+	 *                        |FP/LR|
+	 * current A64_FP =>  -16:+-----+
+	 *                        | ... | callee saved registers
+	 *                        +-----+
+	 *                        |     | x25/x26
+	 * BPF fp register => -80:+-----+
+	 *                        |     |
+	 *                        | ... | BPF prog stack
+	 *                        |     |
+	 *                        |     |
+	 * current A64_SP =>      +-----+
+	 *                        |     |
+	 *                        | ... | Function call stack
+	 *                        |     |
+	 *                        +-----+
+	 *                          low
+	 *
+	 */
+
+	/* Save FP and LR registers to stay align with ARM64 AAPCS */
+	emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
+	emit(A64_MOV(1, A64_FP, A64_SP), ctx);
+
 	/* Save callee-saved register */
 	emit(A64_PUSH(r6, r7, A64_SP), ctx);
 	emit(A64_PUSH(r8, r9, A64_SP), ctx);
 	if (ctx->tmp_used)
 		emit(A64_PUSH(tmp1, tmp2, A64_SP), ctx);
 
-	/* Set up BPF stack */
-	emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
+	/* Save fp (x25) and x26. SP requires 16 bytes alignment */
+	emit(A64_PUSH(fp, A64_R(26), A64_SP), ctx);
 
-	/* Set up frame pointer */
+	/* Set up BPF prog stack base register (x25) */
 	emit(A64_MOV(1, fp, A64_SP), ctx);
 
+	/* Set up function call stack */
+	emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
+
 	/* Clear registers A and X */
 	emit_a64_mov_i64(ra, 0, ctx);
 	emit_a64_mov_i64(rx, 0, ctx);
@@ -190,14 +221,17 @@
 	/* We're done with BPF stack */
 	emit(A64_ADD_I(1, A64_SP, A64_SP, stack_size), ctx);
 
+	/* Restore fs (x25) and x26 */
+	emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
+
 	/* Restore callee-saved register */
 	if (ctx->tmp_used)
 		emit(A64_POP(tmp1, tmp2, A64_SP), ctx);
 	emit(A64_POP(r8, r9, A64_SP), ctx);
 	emit(A64_POP(r6, r7, A64_SP), ctx);
 
-	/* Restore frame pointer */
-	emit(A64_MOV(1, fp, A64_SP), ctx);
+	/* Restore FP/LR registers */
+	emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
 
 	/* Set return value */
 	emit(A64_MOV(1, A64_R(0), r0), ctx);
@@ -758,7 +792,7 @@
 	if (bpf_jit_enable > 1)
 		bpf_jit_dump(prog->len, image_size, 2, ctx.image);
 
-	bpf_flush_icache(ctx.image, ctx.image + ctx.idx);
+	bpf_flush_icache(header, ctx.image + ctx.idx);
 
 	set_memory_ro((unsigned long)header, header->pages);
 	prog->bpf_func = (void *)ctx.image;
diff --git a/arch/powerpc/include/asm/systbl.h b/arch/powerpc/include/asm/systbl.h
index c9e26cb..f2b0b1b 100644
--- a/arch/powerpc/include/asm/systbl.h
+++ b/arch/powerpc/include/asm/systbl.h
@@ -382,3 +382,4 @@
 SYSCALL(shmdt)
 SYSCALL(shmget)
 COMPAT_SYS(shmctl)
+SYSCALL(mlock2)
diff --git a/arch/powerpc/include/asm/unistd.h b/arch/powerpc/include/asm/unistd.h
index 6d8f802..4b6b8ac 100644
--- a/arch/powerpc/include/asm/unistd.h
+++ b/arch/powerpc/include/asm/unistd.h
@@ -12,7 +12,7 @@
 #include <uapi/asm/unistd.h>
 
 
-#define __NR_syscalls		378
+#define __NR_syscalls		379
 
 #define __NR__exit __NR_exit
 #define NR_syscalls	__NR_syscalls
diff --git a/arch/powerpc/include/uapi/asm/unistd.h b/arch/powerpc/include/uapi/asm/unistd.h
index 81579e9..1effea5 100644
--- a/arch/powerpc/include/uapi/asm/unistd.h
+++ b/arch/powerpc/include/uapi/asm/unistd.h
@@ -400,5 +400,6 @@
 #define __NR_shmdt		375
 #define __NR_shmget		376
 #define __NR_shmctl		377
+#define __NR_mlock2		378
 
 #endif /* _UAPI_ASM_POWERPC_UNISTD_H_ */
diff --git a/arch/s390/include/asm/cio.h b/arch/s390/include/asm/cio.h
index 0c5d8ee..d1e7b0a 100644
--- a/arch/s390/include/asm/cio.h
+++ b/arch/s390/include/asm/cio.h
@@ -312,6 +312,7 @@
 extern void reipl_ccw_dev(struct ccw_dev_id *id);
 
 struct cio_iplinfo {
+	u8 ssid;
 	u16 devno;
 	int is_qdio;
 };
diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
index 3ad48f2..bab6739 100644
--- a/arch/s390/include/asm/elf.h
+++ b/arch/s390/include/asm/elf.h
@@ -206,9 +206,16 @@
 } while (0)
 #endif /* CONFIG_COMPAT */
 
-extern unsigned long mmap_rnd_mask;
-
-#define STACK_RND_MASK	(test_thread_flag(TIF_31BIT) ? 0x7ff : mmap_rnd_mask)
+/*
+ * Cache aliasing on the latest machines calls for a mapping granularity
+ * of 512KB. For 64-bit processes use a 512KB alignment and a randomization
+ * of up to 1GB. For 31-bit processes the virtual address space is limited,
+ * use no alignment and limit the randomization to 8MB.
+ */
+#define BRK_RND_MASK	(is_32bit_task() ? 0x7ffUL : 0x3ffffUL)
+#define MMAP_RND_MASK	(is_32bit_task() ? 0x7ffUL : 0x3ff80UL)
+#define MMAP_ALIGN_MASK	(is_32bit_task() ? 0 : 0x7fUL)
+#define STACK_RND_MASK	MMAP_RND_MASK
 
 #define ARCH_DLINFO							    \
 do {									    \
diff --git a/arch/s390/include/asm/ipl.h b/arch/s390/include/asm/ipl.h
index 39ae6a3..86634e7 100644
--- a/arch/s390/include/asm/ipl.h
+++ b/arch/s390/include/asm/ipl.h
@@ -64,7 +64,8 @@
 
 struct ipl_block_ccw {
 	u8  reserved1[84];
-	u8  reserved2[2];
+	u16 reserved2 : 13;
+	u8  ssid : 3;
 	u16 devno;
 	u8  vm_flags;
 	u8  reserved3[3];
diff --git a/arch/s390/include/asm/pci_dma.h b/arch/s390/include/asm/pci_dma.h
index 7a7abf1..1aac41e 100644
--- a/arch/s390/include/asm/pci_dma.h
+++ b/arch/s390/include/asm/pci_dma.h
@@ -195,5 +195,7 @@
 void dma_free_seg_table(unsigned long);
 unsigned long *dma_alloc_cpu_table(void);
 void dma_cleanup_tables(unsigned long *);
-void dma_update_cpu_trans(unsigned long *, void *, dma_addr_t, int);
+unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr);
+void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags);
+
 #endif
diff --git a/arch/s390/include/asm/trace/diag.h b/arch/s390/include/asm/trace/diag.h
index 776f307..cc6cfe7 100644
--- a/arch/s390/include/asm/trace/diag.h
+++ b/arch/s390/include/asm/trace/diag.h
@@ -19,7 +19,7 @@
 #define TRACE_INCLUDE_PATH asm/trace
 #define TRACE_INCLUDE_FILE diag
 
-TRACE_EVENT(diagnose,
+TRACE_EVENT(s390_diagnose,
 	TP_PROTO(unsigned short nr),
 	TP_ARGS(nr),
 	TP_STRUCT__entry(
@@ -32,9 +32,9 @@
 );
 
 #ifdef CONFIG_TRACEPOINTS
-void trace_diagnose_norecursion(int diag_nr);
+void trace_s390_diagnose_norecursion(int diag_nr);
 #else
-static inline void trace_diagnose_norecursion(int diag_nr) { }
+static inline void trace_s390_diagnose_norecursion(int diag_nr) { }
 #endif
 
 #endif /* _TRACE_S390_DIAG_H */
diff --git a/arch/s390/include/uapi/asm/unistd.h b/arch/s390/include/uapi/asm/unistd.h
index a848adb..34ec202 100644
--- a/arch/s390/include/uapi/asm/unistd.h
+++ b/arch/s390/include/uapi/asm/unistd.h
@@ -192,14 +192,14 @@
 #define __NR_set_tid_address	252
 #define __NR_fadvise64		253
 #define __NR_timer_create	254
-#define __NR_timer_settime	(__NR_timer_create+1)
-#define __NR_timer_gettime	(__NR_timer_create+2)
-#define __NR_timer_getoverrun	(__NR_timer_create+3)
-#define __NR_timer_delete	(__NR_timer_create+4)
-#define __NR_clock_settime	(__NR_timer_create+5)
-#define __NR_clock_gettime	(__NR_timer_create+6)
-#define __NR_clock_getres	(__NR_timer_create+7)
-#define __NR_clock_nanosleep	(__NR_timer_create+8)
+#define __NR_timer_settime	255
+#define __NR_timer_gettime	256
+#define __NR_timer_getoverrun	257
+#define __NR_timer_delete	258
+#define __NR_clock_settime	259
+#define __NR_clock_gettime	260
+#define __NR_clock_getres	261
+#define __NR_clock_nanosleep	262
 /* Number 263 is reserved for vserver */
 #define __NR_statfs64		265
 #define __NR_fstatfs64		266
@@ -309,7 +309,8 @@
 #define __NR_recvfrom		371
 #define __NR_recvmsg		372
 #define __NR_shutdown		373
-#define NR_syscalls 374
+#define __NR_mlock2		374
+#define NR_syscalls 375
 
 /* 
  * There are some system calls that are not present on 64 bit, some
diff --git a/arch/s390/kernel/compat_wrapper.c b/arch/s390/kernel/compat_wrapper.c
index 09f1940..fac4eed 100644
--- a/arch/s390/kernel/compat_wrapper.c
+++ b/arch/s390/kernel/compat_wrapper.c
@@ -176,3 +176,4 @@
 COMPAT_SYSCALL_WRAP3(getsockname, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
 COMPAT_SYSCALL_WRAP3(getpeername, int, fd, struct sockaddr __user *, usockaddr, int __user *, usockaddr_len);
 COMPAT_SYSCALL_WRAP6(sendto, int, fd, void __user *, buff, size_t, len, unsigned int, flags, struct sockaddr __user *, addr, int, addr_len);
+COMPAT_SYSCALL_WRAP3(mlock2, unsigned long, start, size_t, len, int, flags);
diff --git a/arch/s390/kernel/diag.c b/arch/s390/kernel/diag.c
index f98766e..48b37b8 100644
--- a/arch/s390/kernel/diag.c
+++ b/arch/s390/kernel/diag.c
@@ -121,14 +121,14 @@
 void diag_stat_inc(enum diag_stat_enum nr)
 {
 	this_cpu_inc(diag_stat.counter[nr]);
-	trace_diagnose(diag_map[nr].code);
+	trace_s390_diagnose(diag_map[nr].code);
 }
 EXPORT_SYMBOL(diag_stat_inc);
 
 void diag_stat_inc_norecursion(enum diag_stat_enum nr)
 {
 	this_cpu_inc(diag_stat.counter[nr]);
-	trace_diagnose_norecursion(diag_map[nr].code);
+	trace_s390_diagnose_norecursion(diag_map[nr].code);
 }
 EXPORT_SYMBOL(diag_stat_inc_norecursion);
 
diff --git a/arch/s390/kernel/head.S b/arch/s390/kernel/head.S
index 1255c6c..301ee9c 100644
--- a/arch/s390/kernel/head.S
+++ b/arch/s390/kernel/head.S
@@ -26,6 +26,7 @@
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
 #include <asm/page.h>
+#include <asm/ptrace.h>
 
 #define ARCH_OFFSET	4
 
@@ -59,19 +60,6 @@
 	.long	0x020006e0,0x20000050
 
 	.org	0x200
-#
-# subroutine to set architecture mode
-#
-.Lsetmode:
-	mvi	__LC_AR_MODE_ID,1	# set esame flag
-	slr	%r0,%r0 		# set cpuid to zero
-	lhi	%r1,2			# mode 2 = esame (dump)
-	sigp	%r1,%r0,0x12		# switch to esame mode
-	bras	%r13,0f
-	.fill	16,4,0x0
-0:	lmh	%r0,%r15,0(%r13)	# clear high-order half of gprs
-	sam31				# switch to 31 bit addressing mode
-	br	%r14
 
 #
 # subroutine to wait for end I/O
@@ -159,7 +147,14 @@
 	.long	0x02200050,0x00000000
 
 iplstart:
-	bas	%r14,.Lsetmode		# Immediately switch to 64 bit mode
+	mvi	__LC_AR_MODE_ID,1	# set esame flag
+	slr	%r0,%r0			# set cpuid to zero
+	lhi	%r1,2			# mode 2 = esame (dump)
+	sigp	%r1,%r0,0x12		# switch to esame mode
+	bras	%r13,0f
+	.fill	16,4,0x0
+0:	lmh	%r0,%r15,0(%r13)	# clear high-order half of gprs
+	sam31				# switch to 31 bit addressing mode
 	lh	%r1,0xb8		# test if subchannel number
 	bct	%r1,.Lnoload		#  is valid
 	l	%r1,0xb8		# load ipl subchannel number
@@ -269,71 +264,6 @@
 .Lcpuid:.fill	8,1,0
 
 #
-# SALIPL loader support. Based on a patch by Rob van der Heij.
-# This entry point is called directly from the SALIPL loader and
-# doesn't need a builtin ipl record.
-#
-	.org	0x800
-ENTRY(start)
-	stm	%r0,%r15,0x07b0		# store registers
-	bas	%r14,.Lsetmode		# Immediately switch to 64 bit mode
-	basr	%r12,%r0
-.base:
-	l	%r11,.parm
-	l	%r8,.cmd		# pointer to command buffer
-
-	ltr	%r9,%r9			# do we have SALIPL parameters?
-	bp	.sk8x8
-
-	mvc	0(64,%r8),0x00b0	# copy saved registers
-	xc	64(240-64,%r8),0(%r8)	# remainder of buffer
-	tr	0(64,%r8),.lowcase
-	b	.gotr
-.sk8x8:
-	mvc	0(240,%r8),0(%r9)	# copy iplparms into buffer
-.gotr:
-	slr	%r0,%r0
-	st	%r0,INITRD_SIZE+ARCH_OFFSET-PARMAREA(%r11)
-	st	%r0,INITRD_START+ARCH_OFFSET-PARMAREA(%r11)
-	j	startup 		# continue with startup
-.cmd:	.long	COMMAND_LINE		# address of command line buffer
-.parm:	.long	PARMAREA
-.lowcase:
-	.byte 0x00,0x01,0x02,0x03,0x04,0x05,0x06,0x07
-	.byte 0x08,0x09,0x0a,0x0b,0x0c,0x0d,0x0e,0x0f
-	.byte 0x10,0x11,0x12,0x13,0x14,0x15,0x16,0x17
-	.byte 0x18,0x19,0x1a,0x1b,0x1c,0x1d,0x1e,0x1f
-	.byte 0x20,0x21,0x22,0x23,0x24,0x25,0x26,0x27
-	.byte 0x28,0x29,0x2a,0x2b,0x2c,0x2d,0x2e,0x2f
-	.byte 0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37
-	.byte 0x38,0x39,0x3a,0x3b,0x3c,0x3d,0x3e,0x3f
-	.byte 0x40,0x41,0x42,0x43,0x44,0x45,0x46,0x47
-	.byte 0x48,0x49,0x4a,0x4b,0x4c,0x4d,0x4e,0x4f
-	.byte 0x50,0x51,0x52,0x53,0x54,0x55,0x56,0x57
-	.byte 0x58,0x59,0x5a,0x5b,0x5c,0x5d,0x5e,0x5f
-	.byte 0x60,0x61,0x62,0x63,0x64,0x65,0x66,0x67
-	.byte 0x68,0x69,0x6a,0x6b,0x6c,0x6d,0x6e,0x6f
-	.byte 0x70,0x71,0x72,0x73,0x74,0x75,0x76,0x77
-	.byte 0x78,0x79,0x7a,0x7b,0x7c,0x7d,0x7e,0x7f
-
-	.byte 0x80,0x81,0x82,0x83,0x84,0x85,0x86,0x87
-	.byte 0x88,0x89,0x8a,0x8b,0x8c,0x8d,0x8e,0x8f
-	.byte 0x90,0x91,0x92,0x93,0x94,0x95,0x96,0x97
-	.byte 0x98,0x99,0x9a,0x9b,0x9c,0x9d,0x9e,0x9f
-	.byte 0xa0,0xa1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7
-	.byte 0xa8,0xa9,0xaa,0xab,0xac,0xad,0xae,0xaf
-	.byte 0xb0,0xb1,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7
-	.byte 0xb8,0xb9,0xba,0xbb,0xbc,0xbd,0xbe,0xbf
-	.byte 0xc0,0x81,0x82,0x83,0x84,0x85,0x86,0x87	# .abcdefg
-	.byte 0x88,0x89,0xca,0xcb,0xcc,0xcd,0xce,0xcf	# hi
-	.byte 0xd0,0x91,0x92,0x93,0x94,0x95,0x96,0x97	# .jklmnop
-	.byte 0x98,0x99,0xda,0xdb,0xdc,0xdd,0xde,0xdf	# qr
-	.byte 0xe0,0xe1,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7	# ..stuvwx
-	.byte 0xa8,0xa9,0xea,0xeb,0xec,0xed,0xee,0xef	# yz
-	.byte 0xf0,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7
-	.byte 0xf8,0xf9,0xfa,0xfb,0xfc,0xfd,0xfe,0xff
-
-#
 # startup-code at 0x10000, running in absolute addressing mode
 # this is called either by the ipl loader or directly by PSW restart
 # or linload or SALIPL
@@ -364,7 +294,7 @@
 	bras	%r13,0f
 	.fill	16,4,0x0
 0:	lmh	%r0,%r15,0(%r13)	# clear high-order half of gprs
-	sam31				# switch to 31 bit addressing mode
+	sam64				# switch to 64 bit addressing mode
 	basr	%r13,0			# get base
 .LPG0:
 	xc	0x200(256),0x200	# partially clear lowcore
@@ -395,7 +325,7 @@
 	jnz	1b
 	j	4f
 2:	l	%r15,.Lstack-.LPG0(%r13)
-	ahi	%r15,-96
+	ahi	%r15,-STACK_FRAME_OVERHEAD
 	la	%r2,.Lals_string-.LPG0(%r13)
 	l	%r3,.Lsclp_print-.LPG0(%r13)
 	basr	%r14,%r3
@@ -429,8 +359,7 @@
 	.long 1, 0xc0000000
 #endif
 4:
-	/* Continue with 64bit startup code in head64.S */
-	sam64				# switch to 64 bit mode
+	/* Continue with startup code in head64.S */
 	jg	startup_continue
 
 	.align	8
diff --git a/arch/s390/kernel/ipl.c b/arch/s390/kernel/ipl.c
index f6d8acd..b1f0a90 100644
--- a/arch/s390/kernel/ipl.c
+++ b/arch/s390/kernel/ipl.c
@@ -121,6 +121,7 @@
  * Must be in data section since the bss section
  * is not cleared when these are accessed.
  */
+static u8 ipl_ssid __attribute__((__section__(".data"))) = 0;
 static u16 ipl_devno __attribute__((__section__(".data"))) = 0;
 u32 ipl_flags __attribute__((__section__(".data"))) = 0;
 
@@ -197,6 +198,33 @@
 	return snprintf(page, PAGE_SIZE, _format, ##args);		\
 }
 
+#define IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk)			\
+static ssize_t sys_##_prefix##_##_name##_store(struct kobject *kobj,	\
+		struct kobj_attribute *attr,				\
+		const char *buf, size_t len)				\
+{									\
+	unsigned long long ssid, devno;					\
+									\
+	if (sscanf(buf, "0.%llx.%llx\n", &ssid, &devno) != 2)		\
+		return -EINVAL;						\
+									\
+	if (ssid > __MAX_SSID || devno > __MAX_SUBCHANNEL)		\
+		return -EINVAL;						\
+									\
+	_ipl_blk.ssid = ssid;						\
+	_ipl_blk.devno = devno;						\
+	return len;							\
+}
+
+#define DEFINE_IPL_CCW_ATTR_RW(_prefix, _name, _ipl_blk)		\
+IPL_ATTR_SHOW_FN(_prefix, _name, "0.%x.%04x\n",				\
+		 _ipl_blk.ssid, _ipl_blk.devno);			\
+IPL_ATTR_CCW_STORE_FN(_prefix, _name, _ipl_blk);			\
+static struct kobj_attribute sys_##_prefix##_##_name##_attr =		\
+	__ATTR(_name, (S_IRUGO | S_IWUSR),				\
+	       sys_##_prefix##_##_name##_show,				\
+	       sys_##_prefix##_##_name##_store)				\
+
 #define DEFINE_IPL_ATTR_RO(_prefix, _name, _format, _value)		\
 IPL_ATTR_SHOW_FN(_prefix, _name, _format, _value)			\
 static struct kobj_attribute sys_##_prefix##_##_name##_attr =		\
@@ -395,7 +423,7 @@
 
 	switch (ipl_info.type) {
 	case IPL_TYPE_CCW:
-		return sprintf(page, "0.0.%04x\n", ipl_devno);
+		return sprintf(page, "0.%x.%04x\n", ipl_ssid, ipl_devno);
 	case IPL_TYPE_FCP:
 	case IPL_TYPE_FCP_DUMP:
 		return sprintf(page, "0.0.%04x\n", ipl->ipl_info.fcp.devno);
@@ -687,21 +715,14 @@
 				       struct bin_attribute *attr,
 				       char *buf, loff_t off, size_t count)
 {
+	size_t scpdata_len = count;
 	size_t padding;
-	size_t scpdata_len;
 
-	if (off < 0)
+
+	if (off)
 		return -EINVAL;
 
-	if (off >= DIAG308_SCPDATA_SIZE)
-		return -ENOSPC;
-
-	if (count > DIAG308_SCPDATA_SIZE - off)
-		count = DIAG308_SCPDATA_SIZE - off;
-
-	memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf + off, count);
-	scpdata_len = off + count;
-
+	memcpy(reipl_block_fcp->ipl_info.fcp.scp_data, buf, count);
 	if (scpdata_len % 8) {
 		padding = 8 - (scpdata_len % 8);
 		memset(reipl_block_fcp->ipl_info.fcp.scp_data + scpdata_len,
@@ -717,7 +738,7 @@
 }
 static struct bin_attribute sys_reipl_fcp_scp_data_attr =
 	__BIN_ATTR(scp_data, (S_IRUGO | S_IWUSR), reipl_fcp_scpdata_read,
-		   reipl_fcp_scpdata_write, PAGE_SIZE);
+		   reipl_fcp_scpdata_write, DIAG308_SCPDATA_SIZE);
 
 static struct bin_attribute *reipl_fcp_bin_attrs[] = {
 	&sys_reipl_fcp_scp_data_attr,
@@ -814,9 +835,7 @@
 };
 
 /* CCW reipl device attributes */
-
-DEFINE_IPL_ATTR_RW(reipl_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
-	reipl_block_ccw->ipl_info.ccw.devno);
+DEFINE_IPL_CCW_ATTR_RW(reipl_ccw, device, reipl_block_ccw->ipl_info.ccw);
 
 /* NSS wrapper */
 static ssize_t reipl_nss_loadparm_show(struct kobject *kobj,
@@ -1056,8 +1075,8 @@
 
 	switch (reipl_method) {
 	case REIPL_METHOD_CCW_CIO:
+		devid.ssid  = reipl_block_ccw->ipl_info.ccw.ssid;
 		devid.devno = reipl_block_ccw->ipl_info.ccw.devno;
-		devid.ssid  = 0;
 		reipl_ccw_dev(&devid);
 		break;
 	case REIPL_METHOD_CCW_VM:
@@ -1192,6 +1211,7 @@
 
 	reipl_block_ccw_init(reipl_block_ccw);
 	if (ipl_info.type == IPL_TYPE_CCW) {
+		reipl_block_ccw->ipl_info.ccw.ssid = ipl_ssid;
 		reipl_block_ccw->ipl_info.ccw.devno = ipl_devno;
 		reipl_block_ccw_fill_parms(reipl_block_ccw);
 	}
@@ -1336,9 +1356,7 @@
 };
 
 /* CCW dump device attributes */
-
-DEFINE_IPL_ATTR_RW(dump_ccw, device, "0.0.%04llx\n", "0.0.%llx\n",
-		   dump_block_ccw->ipl_info.ccw.devno);
+DEFINE_IPL_CCW_ATTR_RW(dump_ccw, device, dump_block_ccw->ipl_info.ccw);
 
 static struct attribute *dump_ccw_attrs[] = {
 	&sys_dump_ccw_device_attr.attr,
@@ -1418,8 +1436,8 @@
 
 	switch (dump_method) {
 	case DUMP_METHOD_CCW_CIO:
+		devid.ssid  = dump_block_ccw->ipl_info.ccw.ssid;
 		devid.devno = dump_block_ccw->ipl_info.ccw.devno;
-		devid.ssid  = 0;
 		reipl_ccw_dev(&devid);
 		break;
 	case DUMP_METHOD_CCW_VM:
@@ -1939,14 +1957,14 @@
 	ipl_info.type = get_ipl_type();
 	switch (ipl_info.type) {
 	case IPL_TYPE_CCW:
+		ipl_info.data.ccw.dev_id.ssid = ipl_ssid;
 		ipl_info.data.ccw.dev_id.devno = ipl_devno;
-		ipl_info.data.ccw.dev_id.ssid = 0;
 		break;
 	case IPL_TYPE_FCP:
 	case IPL_TYPE_FCP_DUMP:
+		ipl_info.data.fcp.dev_id.ssid = 0;
 		ipl_info.data.fcp.dev_id.devno =
 			IPL_PARMBLOCK_START->ipl_info.fcp.devno;
-		ipl_info.data.fcp.dev_id.ssid = 0;
 		ipl_info.data.fcp.wwpn = IPL_PARMBLOCK_START->ipl_info.fcp.wwpn;
 		ipl_info.data.fcp.lun = IPL_PARMBLOCK_START->ipl_info.fcp.lun;
 		break;
@@ -1978,6 +1996,7 @@
 	if (cio_get_iplinfo(&iplinfo))
 		return;
 
+	ipl_ssid = iplinfo.ssid;
 	ipl_devno = iplinfo.devno;
 	ipl_flags |= IPL_DEVNO_VALID;
 	if (!iplinfo.is_qdio)
diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
index 688a3aa..114ee8b 100644
--- a/arch/s390/kernel/process.c
+++ b/arch/s390/kernel/process.c
@@ -243,11 +243,7 @@
 
 static inline unsigned long brk_rnd(void)
 {
-	/* 8MB for 32bit, 1GB for 64bit */
-	if (is_32bit_task())
-		return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
-	else
-		return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
+	return (get_random_int() & BRK_RND_MASK) << PAGE_SHIFT;
 }
 
 unsigned long arch_randomize_brk(struct mm_struct *mm)
diff --git a/arch/s390/kernel/sclp.c b/arch/s390/kernel/sclp.c
index fa0bdff..9fe7781 100644
--- a/arch/s390/kernel/sclp.c
+++ b/arch/s390/kernel/sclp.c
@@ -21,7 +21,7 @@
 	__ctl_load(cr0_new, 0, 0);
 
 	psw_ext_save = S390_lowcore.external_new_psw;
-	psw_mask = __extract_psw() & (PSW_MASK_EA | PSW_MASK_BA);
+	psw_mask = __extract_psw();
 	S390_lowcore.external_new_psw.mask = psw_mask;
 	psw_wait.mask = psw_mask | PSW_MASK_EXT | PSW_MASK_WAIT;
 	S390_lowcore.ext_int_code = 0;
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index ce0cbd6..c837bca 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -764,9 +764,6 @@
 	get_cpu_id(&cpu_id);
 	add_device_randomness(&cpu_id, sizeof(cpu_id));
 	switch (cpu_id.machine) {
-	case 0x9672:
-		strcpy(elf_platform, "g5");
-		break;
 	case 0x2064:
 	case 0x2066:
 	default:	/* Use "z900" as default for 64 bit kernels. */
diff --git a/arch/s390/kernel/syscalls.S b/arch/s390/kernel/syscalls.S
index 8c56929..5378c3e 100644
--- a/arch/s390/kernel/syscalls.S
+++ b/arch/s390/kernel/syscalls.S
@@ -382,3 +382,4 @@
 SYSCALL(sys_recvfrom,compat_sys_recvfrom)
 SYSCALL(sys_recvmsg,compat_sys_recvmsg)
 SYSCALL(sys_shutdown,sys_shutdown)
+SYSCALL(sys_mlock2,compat_sys_mlock2)
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 73239bb..21a5df9 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -9,11 +9,11 @@
 #define CREATE_TRACE_POINTS
 #include <asm/trace/diag.h>
 
-EXPORT_TRACEPOINT_SYMBOL(diagnose);
+EXPORT_TRACEPOINT_SYMBOL(s390_diagnose);
 
 static DEFINE_PER_CPU(unsigned int, diagnose_trace_depth);
 
-void trace_diagnose_norecursion(int diag_nr)
+void trace_s390_diagnose_norecursion(int diag_nr)
 {
 	unsigned long flags;
 	unsigned int *depth;
@@ -22,7 +22,7 @@
 	depth = this_cpu_ptr(&diagnose_trace_depth);
 	if (*depth == 0) {
 		(*depth)++;
-		trace_diagnose(diag_nr);
+		trace_s390_diagnose(diag_nr);
 		(*depth)--;
 	}
 	local_irq_restore(flags);
diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c
index c3c07d3..c722400 100644
--- a/arch/s390/mm/init.c
+++ b/arch/s390/mm/init.c
@@ -48,37 +48,13 @@
 
 static void __init setup_zero_pages(void)
 {
-	struct cpuid cpu_id;
 	unsigned int order;
 	struct page *page;
 	int i;
 
-	get_cpu_id(&cpu_id);
-	switch (cpu_id.machine) {
-	case 0x9672:	/* g5 */
-	case 0x2064:	/* z900 */
-	case 0x2066:	/* z900 */
-	case 0x2084:	/* z990 */
-	case 0x2086:	/* z990 */
-	case 0x2094:	/* z9-109 */
-	case 0x2096:	/* z9-109 */
-		order = 0;
-		break;
-	case 0x2097:	/* z10 */
-	case 0x2098:	/* z10 */
-	case 0x2817:	/* z196 */
-	case 0x2818:	/* z196 */
-		order = 2;
-		break;
-	case 0x2827:	/* zEC12 */
-	case 0x2828:	/* zEC12 */
-		order = 5;
-		break;
-	case 0x2964:	/* z13 */
-	default:
-		order = 7;
-		break;
-	}
+	/* Latest machines require a mapping granularity of 512KB */
+	order = 7;
+
 	/* Limit number of empty zero pages for small memory sizes */
 	while (order > 2 && (totalram_pages >> 10) < (1UL << order))
 		order--;
diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
index 6e552af..ea01477 100644
--- a/arch/s390/mm/mmap.c
+++ b/arch/s390/mm/mmap.c
@@ -31,9 +31,6 @@
 #include <linux/security.h>
 #include <asm/pgalloc.h>
 
-unsigned long mmap_rnd_mask;
-static unsigned long mmap_align_mask;
-
 static unsigned long stack_maxrandom_size(void)
 {
 	if (!(current->flags & PF_RANDOMIZE))
@@ -62,10 +59,7 @@
 
 unsigned long arch_mmap_rnd(void)
 {
-	if (is_32bit_task())
-		return (get_random_int() & 0x7ff) << PAGE_SHIFT;
-	else
-		return (get_random_int() & mmap_rnd_mask) << PAGE_SHIFT;
+	return (get_random_int() & MMAP_RND_MASK) << PAGE_SHIFT;
 }
 
 static unsigned long mmap_base_legacy(unsigned long rnd)
@@ -92,7 +86,6 @@
 	struct mm_struct *mm = current->mm;
 	struct vm_area_struct *vma;
 	struct vm_unmapped_area_info info;
-	int do_color_align;
 
 	if (len > TASK_SIZE - mmap_min_addr)
 		return -ENOMEM;
@@ -108,15 +101,14 @@
 			return addr;
 	}
 
-	do_color_align = 0;
-	if (filp || (flags & MAP_SHARED))
-		do_color_align = !is_32bit_task();
-
 	info.flags = 0;
 	info.length = len;
 	info.low_limit = mm->mmap_base;
 	info.high_limit = TASK_SIZE;
-	info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+	if (filp || (flags & MAP_SHARED))
+		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
+	else
+		info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
 	return vm_unmapped_area(&info);
 }
@@ -130,7 +122,6 @@
 	struct mm_struct *mm = current->mm;
 	unsigned long addr = addr0;
 	struct vm_unmapped_area_info info;
-	int do_color_align;
 
 	/* requested length too big for entire address space */
 	if (len > TASK_SIZE - mmap_min_addr)
@@ -148,15 +139,14 @@
 			return addr;
 	}
 
-	do_color_align = 0;
-	if (filp || (flags & MAP_SHARED))
-		do_color_align = !is_32bit_task();
-
 	info.flags = VM_UNMAPPED_AREA_TOPDOWN;
 	info.length = len;
 	info.low_limit = max(PAGE_SIZE, mmap_min_addr);
 	info.high_limit = mm->mmap_base;
-	info.align_mask = do_color_align ? (mmap_align_mask << PAGE_SHIFT) : 0;
+	if (filp || (flags & MAP_SHARED))
+		info.align_mask = MMAP_ALIGN_MASK << PAGE_SHIFT;
+	else
+		info.align_mask = 0;
 	info.align_offset = pgoff << PAGE_SHIFT;
 	addr = vm_unmapped_area(&info);
 
@@ -254,35 +244,3 @@
 		mm->get_unmapped_area = s390_get_unmapped_area_topdown;
 	}
 }
-
-static int __init setup_mmap_rnd(void)
-{
-	struct cpuid cpu_id;
-
-	get_cpu_id(&cpu_id);
-	switch (cpu_id.machine) {
-	case 0x9672:
-	case 0x2064:
-	case 0x2066:
-	case 0x2084:
-	case 0x2086:
-	case 0x2094:
-	case 0x2096:
-	case 0x2097:
-	case 0x2098:
-	case 0x2817:
-	case 0x2818:
-	case 0x2827:
-	case 0x2828:
-		mmap_rnd_mask = 0x7ffUL;
-		mmap_align_mask = 0UL;
-		break;
-	case 0x2964:	/* z13 */
-	default:
-		mmap_rnd_mask = 0x3ff80UL;
-		mmap_align_mask = 0x7fUL;
-		break;
-	}
-	return 0;
-}
-early_initcall(setup_mmap_rnd);
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
index 37d10f7..d348f2c 100644
--- a/arch/s390/pci/pci_dma.c
+++ b/arch/s390/pci/pci_dma.c
@@ -33,7 +33,7 @@
 		return NULL;
 
 	for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
-		*entry = ZPCI_TABLE_INVALID | ZPCI_TABLE_PROTECTED;
+		*entry = ZPCI_TABLE_INVALID;
 	return table;
 }
 
@@ -51,7 +51,7 @@
 		return NULL;
 
 	for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
-		*entry = ZPCI_PTE_INVALID | ZPCI_TABLE_PROTECTED;
+		*entry = ZPCI_PTE_INVALID;
 	return table;
 }
 
@@ -95,7 +95,7 @@
 	return pto;
 }
 
-static unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
+unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
 {
 	unsigned long *sto, *pto;
 	unsigned int rtx, sx, px;
@@ -114,20 +114,10 @@
 	return &pto[px];
 }
 
-void dma_update_cpu_trans(unsigned long *dma_table, void *page_addr,
-			  dma_addr_t dma_addr, int flags)
+void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
 {
-	unsigned long *entry;
-
-	entry = dma_walk_cpu_trans(dma_table, dma_addr);
-	if (!entry) {
-		WARN_ON_ONCE(1);
-		return;
-	}
-
 	if (flags & ZPCI_PTE_INVALID) {
 		invalidate_pt_entry(entry);
-		return;
 	} else {
 		set_pt_pfaa(entry, page_addr);
 		validate_pt_entry(entry);
@@ -146,18 +136,25 @@
 	u8 *page_addr = (u8 *) (pa & PAGE_MASK);
 	dma_addr_t start_dma_addr = dma_addr;
 	unsigned long irq_flags;
+	unsigned long *entry;
 	int i, rc = 0;
 
 	if (!nr_pages)
 		return -EINVAL;
 
 	spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
-	if (!zdev->dma_table)
+	if (!zdev->dma_table) {
+		rc = -EINVAL;
 		goto no_refresh;
+	}
 
 	for (i = 0; i < nr_pages; i++) {
-		dma_update_cpu_trans(zdev->dma_table, page_addr, dma_addr,
-				     flags);
+		entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+		if (!entry) {
+			rc = -ENOMEM;
+			goto undo_cpu_trans;
+		}
+		dma_update_cpu_trans(entry, page_addr, flags);
 		page_addr += PAGE_SIZE;
 		dma_addr += PAGE_SIZE;
 	}
@@ -176,6 +173,18 @@
 
 	rc = zpci_refresh_trans((u64) zdev->fh << 32, start_dma_addr,
 				nr_pages * PAGE_SIZE);
+undo_cpu_trans:
+	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
+		flags = ZPCI_PTE_INVALID;
+		while (i-- > 0) {
+			page_addr -= PAGE_SIZE;
+			dma_addr -= PAGE_SIZE;
+			entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+			if (!entry)
+				break;
+			dma_update_cpu_trans(entry, page_addr, flags);
+		}
+	}
 
 no_refresh:
 	spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
@@ -260,6 +269,16 @@
 	spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
 }
 
+static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
+{
+	struct {
+		unsigned long rc;
+		unsigned long addr;
+	} __packed data = {rc, addr};
+
+	zpci_err_hex(&data, sizeof(data));
+}
+
 static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
 				     unsigned long offset, size_t size,
 				     enum dma_data_direction direction,
@@ -270,33 +289,40 @@
 	unsigned long pa = page_to_phys(page) + offset;
 	int flags = ZPCI_PTE_VALID;
 	dma_addr_t dma_addr;
+	int ret;
 
 	/* This rounds up number of pages based on size and offset */
 	nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
 	iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
-	if (iommu_page_index == -1)
+	if (iommu_page_index == -1) {
+		ret = -ENOSPC;
 		goto out_err;
+	}
 
 	/* Use rounded up size */
 	size = nr_pages * PAGE_SIZE;
 
 	dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
-	if (dma_addr + size > zdev->end_dma)
+	if (dma_addr + size > zdev->end_dma) {
+		ret = -ERANGE;
 		goto out_free;
+	}
 
 	if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
 		flags |= ZPCI_TABLE_PROTECTED;
 
-	if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
-		atomic64_add(nr_pages, &zdev->mapped_pages);
-		return dma_addr + (offset & ~PAGE_MASK);
-	}
+	ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
+	if (ret)
+		goto out_free;
+
+	atomic64_add(nr_pages, &zdev->mapped_pages);
+	return dma_addr + (offset & ~PAGE_MASK);
 
 out_free:
 	dma_free_iommu(zdev, iommu_page_index, nr_pages);
 out_err:
 	zpci_err("map error:\n");
-	zpci_err_hex(&pa, sizeof(pa));
+	zpci_err_dma(ret, pa);
 	return DMA_ERROR_CODE;
 }
 
@@ -306,14 +332,16 @@
 {
 	struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
 	unsigned long iommu_page_index;
-	int npages;
+	int npages, ret;
 
 	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
 	dma_addr = dma_addr & PAGE_MASK;
-	if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
-			     ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID)) {
+	ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
+			       ZPCI_PTE_INVALID);
+	if (ret) {
 		zpci_err("unmap error:\n");
-		zpci_err_hex(&dma_addr, sizeof(dma_addr));
+		zpci_err_dma(ret, dma_addr);
+		return;
 	}
 
 	atomic64_add(npages, &zdev->unmapped_pages);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 9f39056..690b402 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -35,7 +35,7 @@
 #define MSR_IA32_PERFCTR0		0x000000c1
 #define MSR_IA32_PERFCTR1		0x000000c2
 #define MSR_FSB_FREQ			0x000000cd
-#define MSR_NHM_PLATFORM_INFO		0x000000ce
+#define MSR_PLATFORM_INFO		0x000000ce
 
 #define MSR_NHM_SNB_PKG_CST_CFG_CTL	0x000000e2
 #define NHM_C3_AUTO_DEMOTE		(1UL << 25)
@@ -44,7 +44,6 @@
 #define SNB_C1_AUTO_UNDEMOTE		(1UL << 27)
 #define SNB_C3_AUTO_UNDEMOTE		(1UL << 28)
 
-#define MSR_PLATFORM_INFO		0x000000ce
 #define MSR_MTRRcap			0x000000fe
 #define MSR_IA32_BBL_CR_CTL		0x00000119
 #define MSR_IA32_BBL_CR_CTL3		0x0000011e
diff --git a/block/blk.h b/block/blk.h
index da722eb..c43926d 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -72,8 +72,6 @@
 void __blk_queue_free_tags(struct request_queue *q);
 bool __blk_end_bidi_request(struct request *rq, int error,
 			    unsigned int nr_bytes, unsigned int bidi_bytes);
-int blk_queue_enter(struct request_queue *q, gfp_t gfp);
-void blk_queue_exit(struct request_queue *q);
 void blk_freeze_queue(struct request_queue *q);
 
 static inline void blk_queue_enter_live(struct request_queue *q)
diff --git a/drivers/acpi/cppc_acpi.c b/drivers/acpi/cppc_acpi.c
index 3c083d2..6730f96 100644
--- a/drivers/acpi/cppc_acpi.c
+++ b/drivers/acpi/cppc_acpi.c
@@ -304,7 +304,7 @@
 
 static int register_pcc_channel(int pcc_subspace_idx)
 {
-	struct acpi_pcct_subspace *cppc_ss;
+	struct acpi_pcct_hw_reduced *cppc_ss;
 	unsigned int len;
 
 	if (pcc_subspace_idx >= 0) {
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f61a7c8..b420fb4 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -1103,7 +1103,7 @@
 	}
 
 err_exit:
-	if (result && q)
+	if (result)
 		acpi_ec_delete_query(q);
 	if (data)
 		*data = value;
diff --git a/drivers/acpi/sbshc.c b/drivers/acpi/sbshc.c
index bf034f8..2fa8304 100644
--- a/drivers/acpi/sbshc.c
+++ b/drivers/acpi/sbshc.c
@@ -14,7 +14,6 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
-#include <linux/dmi.h>
 #include "sbshc.h"
 
 #define PREFIX "ACPI: "
@@ -30,6 +29,7 @@
 	u8 query_bit;
 	smbus_alarm_callback callback;
 	void *context;
+	bool done;
 };
 
 static int acpi_smbus_hc_add(struct acpi_device *device);
@@ -88,8 +88,6 @@
 	ACPI_SMB_ALARM_DATA = 0x26,	/* 2 bytes alarm data */
 };
 
-static bool macbook;
-
 static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
 {
 	return ec_read(hc->offset + address, data);
@@ -100,27 +98,11 @@
 	return ec_write(hc->offset + address, data);
 }
 
-static inline int smb_check_done(struct acpi_smb_hc *hc)
-{
-	union acpi_smb_status status = {.raw = 0};
-	smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw);
-	return status.fields.done && (status.fields.status == SMBUS_OK);
-}
-
 static int wait_transaction_complete(struct acpi_smb_hc *hc, int timeout)
 {
-	if (wait_event_timeout(hc->wait, smb_check_done(hc),
-			       msecs_to_jiffies(timeout)))
+	if (wait_event_timeout(hc->wait, hc->done, msecs_to_jiffies(timeout)))
 		return 0;
-	/*
-	 * After the timeout happens, OS will try to check the status of SMbus.
-	 * If the status is what OS expected, it will be regarded as the bogus
-	 * timeout.
-	 */
-	if (smb_check_done(hc))
-		return 0;
-	else
-		return -ETIME;
+	return -ETIME;
 }
 
 static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
@@ -135,8 +117,7 @@
 	}
 
 	mutex_lock(&hc->lock);
-	if (macbook)
-		udelay(5);
+	hc->done = false;
 	if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
 		goto end;
 	if (temp) {
@@ -235,8 +216,10 @@
 	if (smb_hc_read(hc, ACPI_SMB_STATUS, &status.raw))
 		return 0;
 	/* Check if it is only a completion notify */
-	if (status.fields.done)
+	if (status.fields.done && status.fields.status == SMBUS_OK) {
+		hc->done = true;
 		wake_up(&hc->wait);
+	}
 	if (!status.fields.alarm)
 		return 0;
 	mutex_lock(&hc->lock);
@@ -262,29 +245,12 @@
 			      acpi_handle handle, acpi_ec_query_func func,
 			      void *data);
 
-static int macbook_dmi_match(const struct dmi_system_id *d)
-{
-	pr_debug("Detected MacBook, enabling workaround\n");
-	macbook = true;
-	return 0;
-}
-
-static struct dmi_system_id acpi_smbus_dmi_table[] = {
-	{ macbook_dmi_match, "Apple MacBook", {
-	  DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
-	  DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
-	},
-	{ },
-};
-
 static int acpi_smbus_hc_add(struct acpi_device *device)
 {
 	int status;
 	unsigned long long val;
 	struct acpi_smb_hc *hc;
 
-	dmi_check_system(acpi_smbus_dmi_table);
-
 	if (!device)
 		return -EINVAL;
 
diff --git a/drivers/base/power/wakeirq.c b/drivers/base/power/wakeirq.c
index eb6e674..0d77cd6 100644
--- a/drivers/base/power/wakeirq.c
+++ b/drivers/base/power/wakeirq.c
@@ -68,6 +68,9 @@
 	struct wake_irq *wirq;
 	int err;
 
+	if (irq < 0)
+		return -EINVAL;
+
 	wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
 	if (!wirq)
 		return -ENOMEM;
@@ -167,6 +170,9 @@
 	struct wake_irq *wirq;
 	int err;
 
+	if (irq < 0)
+		return -EINVAL;
+
 	wirq = kzalloc(sizeof(*wirq), GFP_KERNEL);
 	if (!wirq)
 		return -ENOMEM;
diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
index 654f6f3..55fe902 100644
--- a/drivers/char/ipmi/ipmi_si_intf.c
+++ b/drivers/char/ipmi/ipmi_si_intf.c
@@ -412,18 +412,42 @@
 	return rv;
 }
 
-static void start_check_enables(struct smi_info *smi_info)
+static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
+{
+	smi_info->last_timeout_jiffies = jiffies;
+	mod_timer(&smi_info->si_timer, new_val);
+	smi_info->timer_running = true;
+}
+
+/*
+ * Start a new message and (re)start the timer and thread.
+ */
+static void start_new_msg(struct smi_info *smi_info, unsigned char *msg,
+			  unsigned int size)
+{
+	smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
+
+	if (smi_info->thread)
+		wake_up_process(smi_info->thread);
+
+	smi_info->handlers->start_transaction(smi_info->si_sm, msg, size);
+}
+
+static void start_check_enables(struct smi_info *smi_info, bool start_timer)
 {
 	unsigned char msg[2];
 
 	msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 	msg[1] = IPMI_GET_BMC_GLOBAL_ENABLES_CMD;
 
-	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
+	if (start_timer)
+		start_new_msg(smi_info, msg, 2);
+	else
+		smi_info->handlers->start_transaction(smi_info->si_sm, msg, 2);
 	smi_info->si_state = SI_CHECKING_ENABLES;
 }
 
-static void start_clear_flags(struct smi_info *smi_info)
+static void start_clear_flags(struct smi_info *smi_info, bool start_timer)
 {
 	unsigned char msg[3];
 
@@ -432,7 +456,10 @@
 	msg[1] = IPMI_CLEAR_MSG_FLAGS_CMD;
 	msg[2] = WDT_PRE_TIMEOUT_INT;
 
-	smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
+	if (start_timer)
+		start_new_msg(smi_info, msg, 3);
+	else
+		smi_info->handlers->start_transaction(smi_info->si_sm, msg, 3);
 	smi_info->si_state = SI_CLEARING_FLAGS;
 }
 
@@ -442,10 +469,8 @@
 	smi_info->curr_msg->data[1] = IPMI_GET_MSG_CMD;
 	smi_info->curr_msg->data_size = 2;
 
-	smi_info->handlers->start_transaction(
-		smi_info->si_sm,
-		smi_info->curr_msg->data,
-		smi_info->curr_msg->data_size);
+	start_new_msg(smi_info, smi_info->curr_msg->data,
+		      smi_info->curr_msg->data_size);
 	smi_info->si_state = SI_GETTING_MESSAGES;
 }
 
@@ -455,20 +480,11 @@
 	smi_info->curr_msg->data[1] = IPMI_READ_EVENT_MSG_BUFFER_CMD;
 	smi_info->curr_msg->data_size = 2;
 
-	smi_info->handlers->start_transaction(
-		smi_info->si_sm,
-		smi_info->curr_msg->data,
-		smi_info->curr_msg->data_size);
+	start_new_msg(smi_info, smi_info->curr_msg->data,
+		      smi_info->curr_msg->data_size);
 	smi_info->si_state = SI_GETTING_EVENTS;
 }
 
-static void smi_mod_timer(struct smi_info *smi_info, unsigned long new_val)
-{
-	smi_info->last_timeout_jiffies = jiffies;
-	mod_timer(&smi_info->si_timer, new_val);
-	smi_info->timer_running = true;
-}
-
 /*
  * When we have a situtaion where we run out of memory and cannot
  * allocate messages, we just leave them in the BMC and run the system
@@ -478,11 +494,11 @@
  * Note that we cannot just use disable_irq(), since the interrupt may
  * be shared.
  */
-static inline bool disable_si_irq(struct smi_info *smi_info)
+static inline bool disable_si_irq(struct smi_info *smi_info, bool start_timer)
 {
 	if ((smi_info->irq) && (!smi_info->interrupt_disabled)) {
 		smi_info->interrupt_disabled = true;
-		start_check_enables(smi_info);
+		start_check_enables(smi_info, start_timer);
 		return true;
 	}
 	return false;
@@ -492,7 +508,7 @@
 {
 	if ((smi_info->irq) && (smi_info->interrupt_disabled)) {
 		smi_info->interrupt_disabled = false;
-		start_check_enables(smi_info);
+		start_check_enables(smi_info, true);
 		return true;
 	}
 	return false;
@@ -510,7 +526,7 @@
 
 	msg = ipmi_alloc_smi_msg();
 	if (!msg) {
-		if (!disable_si_irq(smi_info))
+		if (!disable_si_irq(smi_info, true))
 			smi_info->si_state = SI_NORMAL;
 	} else if (enable_si_irq(smi_info)) {
 		ipmi_free_smi_msg(msg);
@@ -526,7 +542,7 @@
 		/* Watchdog pre-timeout */
 		smi_inc_stat(smi_info, watchdog_pretimeouts);
 
-		start_clear_flags(smi_info);
+		start_clear_flags(smi_info, true);
 		smi_info->msg_flags &= ~WDT_PRE_TIMEOUT_INT;
 		if (smi_info->intf)
 			ipmi_smi_watchdog_pretimeout(smi_info->intf);
@@ -879,8 +895,7 @@
 			msg[0] = (IPMI_NETFN_APP_REQUEST << 2);
 			msg[1] = IPMI_GET_MSG_FLAGS_CMD;
 
-			smi_info->handlers->start_transaction(
-				smi_info->si_sm, msg, 2);
+			start_new_msg(smi_info, msg, 2);
 			smi_info->si_state = SI_GETTING_FLAGS;
 			goto restart;
 		}
@@ -910,7 +925,7 @@
 		 * disable and messages disabled.
 		 */
 		if (smi_info->supports_event_msg_buff || smi_info->irq) {
-			start_check_enables(smi_info);
+			start_check_enables(smi_info, true);
 		} else {
 			smi_info->curr_msg = alloc_msg_handle_irq(smi_info);
 			if (!smi_info->curr_msg)
@@ -920,6 +935,13 @@
 		}
 		goto restart;
 	}
+
+	if (si_sm_result == SI_SM_IDLE && smi_info->timer_running) {
+		/* Ok it if fails, the timer will just go off. */
+		if (del_timer(&smi_info->si_timer))
+			smi_info->timer_running = false;
+	}
+
  out:
 	return si_sm_result;
 }
@@ -2560,6 +2582,7 @@
 	  .data = (void *)(unsigned long) SI_BT },
 	{},
 };
+MODULE_DEVICE_TABLE(of, of_ipmi_match);
 
 static int of_ipmi_probe(struct platform_device *dev)
 {
@@ -2646,7 +2669,6 @@
 	}
 	return 0;
 }
-MODULE_DEVICE_TABLE(of, of_ipmi_match);
 #else
 #define of_ipmi_match NULL
 static int of_ipmi_probe(struct platform_device *dev)
@@ -3613,7 +3635,7 @@
 	 * Start clearing the flags before we enable interrupts or the
 	 * timer to avoid racing with the timer.
 	 */
-	start_clear_flags(new_smi);
+	start_clear_flags(new_smi, false);
 
 	/*
 	 * IRQ is defined to be set when non-zero.  req_events will
@@ -3908,7 +3930,7 @@
 		poll(to_clean);
 		schedule_timeout_uninterruptible(1);
 	}
-	disable_si_irq(to_clean);
+	disable_si_irq(to_clean, false);
 	while (to_clean->curr_msg || (to_clean->si_state != SI_NORMAL)) {
 		poll(to_clean);
 		schedule_timeout_uninterruptible(1);
diff --git a/drivers/char/ipmi/ipmi_watchdog.c b/drivers/char/ipmi/ipmi_watchdog.c
index 0ac3bd1..096f0cef 100644
--- a/drivers/char/ipmi/ipmi_watchdog.c
+++ b/drivers/char/ipmi/ipmi_watchdog.c
@@ -153,6 +153,9 @@
 /* The pre-timeout is disabled by default. */
 static int pretimeout;
 
+/* Default timeout to set on panic */
+static int panic_wdt_timeout = 255;
+
 /* Default action is to reset the board on a timeout. */
 static unsigned char action_val = WDOG_TIMEOUT_RESET;
 
@@ -293,6 +296,9 @@
 module_param(pretimeout, timeout, 0644);
 MODULE_PARM_DESC(pretimeout, "Pretimeout value in seconds.");
 
+module_param(panic_wdt_timeout, timeout, 0644);
+MODULE_PARM_DESC(timeout, "Timeout value on kernel panic in seconds.");
+
 module_param_cb(action, &param_ops_str, action_op, 0644);
 MODULE_PARM_DESC(action, "Timeout action. One of: "
 		 "reset, none, power_cycle, power_off.");
@@ -1189,7 +1195,7 @@
 		/* Make sure we do this only once. */
 		panic_event_handled = 1;
 
-		timeout = 255;
+		timeout = panic_wdt_timeout;
 		pretimeout = 0;
 		panic_halt_ipmi_set_timeout();
 	}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index 1582c1c..8014c23 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -84,6 +84,7 @@
 config ARM_MT8173_CPUFREQ
 	bool "Mediatek MT8173 CPUFreq support"
 	depends on ARCH_MEDIATEK && REGULATOR
+	depends on ARM64 || (ARM_CPU_TOPOLOGY && COMPILE_TEST)
 	depends on !CPU_THERMAL || THERMAL=y
 	select PM_OPP
 	help
diff --git a/drivers/cpufreq/Kconfig.x86 b/drivers/cpufreq/Kconfig.x86
index adbd1de..c59bdcb 100644
--- a/drivers/cpufreq/Kconfig.x86
+++ b/drivers/cpufreq/Kconfig.x86
@@ -5,7 +5,6 @@
 config X86_INTEL_PSTATE
        bool "Intel P state control"
        depends on X86
-       select ACPI_PROCESSOR if ACPI
        help
           This driver provides a P state for Intel core processors.
 	  The driver implements an internal governor and will become
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 2e31d09..001a532 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -34,14 +34,10 @@
 #include <asm/cpu_device_id.h>
 #include <asm/cpufeature.h>
 
-#if IS_ENABLED(CONFIG_ACPI)
-#include <acpi/processor.h>
-#endif
-
-#define BYT_RATIOS		0x66a
-#define BYT_VIDS		0x66b
-#define BYT_TURBO_RATIOS	0x66c
-#define BYT_TURBO_VIDS		0x66d
+#define ATOM_RATIOS		0x66a
+#define ATOM_VIDS		0x66b
+#define ATOM_TURBO_RATIOS	0x66c
+#define ATOM_TURBO_VIDS		0x66d
 
 #define FRAC_BITS 8
 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
@@ -117,9 +113,6 @@
 	u64	prev_mperf;
 	u64	prev_tsc;
 	struct sample sample;
-#if IS_ENABLED(CONFIG_ACPI)
-	struct acpi_processor_performance acpi_perf_data;
-#endif
 };
 
 static struct cpudata **all_cpu_data;
@@ -150,7 +143,6 @@
 static struct pstate_adjust_policy pid_params;
 static struct pstate_funcs pstate_funcs;
 static int hwp_active;
-static int no_acpi_perf;
 
 struct perf_limits {
 	int no_turbo;
@@ -163,8 +155,6 @@
 	int max_sysfs_pct;
 	int min_policy_pct;
 	int min_sysfs_pct;
-	int max_perf_ctl;
-	int min_perf_ctl;
 };
 
 static struct perf_limits performance_limits = {
@@ -191,8 +181,6 @@
 	.max_sysfs_pct = 100,
 	.min_policy_pct = 0,
 	.min_sysfs_pct = 0,
-	.max_perf_ctl = 0,
-	.min_perf_ctl = 0,
 };
 
 #ifdef CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE
@@ -201,153 +189,6 @@
 static struct perf_limits *limits = &powersave_limits;
 #endif
 
-#if IS_ENABLED(CONFIG_ACPI)
-/*
- * The max target pstate ratio is a 8 bit value in both PLATFORM_INFO MSR and
- * in TURBO_RATIO_LIMIT MSR, which pstate driver stores in max_pstate and
- * max_turbo_pstate fields. The PERF_CTL MSR contains 16 bit value for P state
- * ratio, out of it only high 8 bits are used. For example 0x1700 is setting
- * target ratio 0x17. The _PSS control value stores in a format which can be
- * directly written to PERF_CTL MSR. But in intel_pstate driver this shift
- * occurs during write to PERF_CTL (E.g. for cores core_set_pstate()).
- * This function converts the _PSS control value to intel pstate driver format
- * for comparison and assignment.
- */
-static int convert_to_native_pstate_format(struct cpudata *cpu, int index)
-{
-	return cpu->acpi_perf_data.states[index].control >> 8;
-}
-
-static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
-{
-	struct cpudata *cpu;
-	int ret;
-	bool turbo_absent = false;
-	int max_pstate_index;
-	int min_pss_ctl, max_pss_ctl, turbo_pss_ctl;
-	int i;
-
-	cpu = all_cpu_data[policy->cpu];
-
-	pr_debug("intel_pstate: default limits 0x%x 0x%x 0x%x\n",
-		 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
-		 cpu->pstate.turbo_pstate);
-
-	if (!cpu->acpi_perf_data.shared_cpu_map &&
-	    zalloc_cpumask_var_node(&cpu->acpi_perf_data.shared_cpu_map,
-				    GFP_KERNEL, cpu_to_node(policy->cpu))) {
-		return -ENOMEM;
-	}
-
-	ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
-						  policy->cpu);
-	if (ret)
-		return ret;
-
-	/*
-	 * Check if the control value in _PSS is for PERF_CTL MSR, which should
-	 * guarantee that the states returned by it map to the states in our
-	 * list directly.
-	 */
-	if (cpu->acpi_perf_data.control_register.space_id !=
-						ACPI_ADR_SPACE_FIXED_HARDWARE)
-		return -EIO;
-
-	pr_debug("intel_pstate: CPU%u - ACPI _PSS perf data\n", policy->cpu);
-	for (i = 0; i < cpu->acpi_perf_data.state_count; i++)
-		pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
-			 (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
-			 (u32) cpu->acpi_perf_data.states[i].core_frequency,
-			 (u32) cpu->acpi_perf_data.states[i].power,
-			 (u32) cpu->acpi_perf_data.states[i].control);
-
-	/*
-	 * If there is only one entry _PSS, simply ignore _PSS and continue as
-	 * usual without taking _PSS into account
-	 */
-	if (cpu->acpi_perf_data.state_count < 2)
-		return 0;
-
-	turbo_pss_ctl = convert_to_native_pstate_format(cpu, 0);
-	min_pss_ctl = convert_to_native_pstate_format(cpu,
-					cpu->acpi_perf_data.state_count - 1);
-	/* Check if there is a turbo freq in _PSS */
-	if (turbo_pss_ctl <= cpu->pstate.max_pstate &&
-	    turbo_pss_ctl > cpu->pstate.min_pstate) {
-		pr_debug("intel_pstate: no turbo range exists in _PSS\n");
-		limits->no_turbo = limits->turbo_disabled = 1;
-		cpu->pstate.turbo_pstate = cpu->pstate.max_pstate;
-		turbo_absent = true;
-	}
-
-	/* Check if the max non turbo p state < Intel P state max */
-	max_pstate_index = turbo_absent ? 0 : 1;
-	max_pss_ctl = convert_to_native_pstate_format(cpu, max_pstate_index);
-	if (max_pss_ctl < cpu->pstate.max_pstate &&
-	    max_pss_ctl > cpu->pstate.min_pstate)
-		cpu->pstate.max_pstate = max_pss_ctl;
-
-	/* check If min perf > Intel P State min */
-	if (min_pss_ctl > cpu->pstate.min_pstate &&
-	    min_pss_ctl < cpu->pstate.max_pstate) {
-		cpu->pstate.min_pstate = min_pss_ctl;
-		policy->cpuinfo.min_freq = min_pss_ctl * cpu->pstate.scaling;
-	}
-
-	if (turbo_absent)
-		policy->cpuinfo.max_freq = cpu->pstate.max_pstate *
-						cpu->pstate.scaling;
-	else {
-		policy->cpuinfo.max_freq = cpu->pstate.turbo_pstate *
-						cpu->pstate.scaling;
-		/*
-		 * The _PSS table doesn't contain whole turbo frequency range.
-		 * This just contains +1 MHZ above the max non turbo frequency,
-		 * with control value corresponding to max turbo ratio. But
-		 * when cpufreq set policy is called, it will call with this
-		 * max frequency, which will cause a reduced performance as
-		 * this driver uses real max turbo frequency as the max
-		 * frequeny. So correct this frequency in _PSS table to
-		 * correct max turbo frequency based on the turbo ratio.
-		 * Also need to convert to MHz as _PSS freq is in MHz.
-		 */
-		cpu->acpi_perf_data.states[0].core_frequency =
-						turbo_pss_ctl * 100;
-	}
-
-	pr_debug("intel_pstate: Updated limits using _PSS 0x%x 0x%x 0x%x\n",
-		 cpu->pstate.min_pstate, cpu->pstate.max_pstate,
-		 cpu->pstate.turbo_pstate);
-	pr_debug("intel_pstate: policy max_freq=%d Khz min_freq = %d KHz\n",
-		 policy->cpuinfo.max_freq, policy->cpuinfo.min_freq);
-
-	return 0;
-}
-
-static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
-{
-	struct cpudata *cpu;
-
-	if (!no_acpi_perf)
-		return 0;
-
-	cpu = all_cpu_data[policy->cpu];
-	acpi_processor_unregister_performance(policy->cpu);
-	return 0;
-}
-
-#else
-static int intel_pstate_init_perf_limits(struct cpufreq_policy *policy)
-{
-	return 0;
-}
-
-static int intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
-{
-	return 0;
-}
-#endif
-
 static inline void pid_reset(struct _pid *pid, int setpoint, int busy,
 			     int deadband, int integral) {
 	pid->setpoint = setpoint;
@@ -687,31 +528,31 @@
 	wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
 }
 
-static int byt_get_min_pstate(void)
+static int atom_get_min_pstate(void)
 {
 	u64 value;
 
-	rdmsrl(BYT_RATIOS, value);
+	rdmsrl(ATOM_RATIOS, value);
 	return (value >> 8) & 0x7F;
 }
 
-static int byt_get_max_pstate(void)
+static int atom_get_max_pstate(void)
 {
 	u64 value;
 
-	rdmsrl(BYT_RATIOS, value);
+	rdmsrl(ATOM_RATIOS, value);
 	return (value >> 16) & 0x7F;
 }
 
-static int byt_get_turbo_pstate(void)
+static int atom_get_turbo_pstate(void)
 {
 	u64 value;
 
-	rdmsrl(BYT_TURBO_RATIOS, value);
+	rdmsrl(ATOM_TURBO_RATIOS, value);
 	return value & 0x7F;
 }
 
-static void byt_set_pstate(struct cpudata *cpudata, int pstate)
+static void atom_set_pstate(struct cpudata *cpudata, int pstate)
 {
 	u64 val;
 	int32_t vid_fp;
@@ -736,27 +577,42 @@
 	wrmsrl_on_cpu(cpudata->cpu, MSR_IA32_PERF_CTL, val);
 }
 
-#define BYT_BCLK_FREQS 5
-static int byt_freq_table[BYT_BCLK_FREQS] = { 833, 1000, 1333, 1167, 800};
-
-static int byt_get_scaling(void)
+static int silvermont_get_scaling(void)
 {
 	u64 value;
 	int i;
+	/* Defined in Table 35-6 from SDM (Sept 2015) */
+	static int silvermont_freq_table[] = {
+		83300, 100000, 133300, 116700, 80000};
 
 	rdmsrl(MSR_FSB_FREQ, value);
-	i = value & 0x3;
+	i = value & 0x7;
+	WARN_ON(i > 4);
 
-	BUG_ON(i > BYT_BCLK_FREQS);
-
-	return byt_freq_table[i] * 100;
+	return silvermont_freq_table[i];
 }
 
-static void byt_get_vid(struct cpudata *cpudata)
+static int airmont_get_scaling(void)
+{
+	u64 value;
+	int i;
+	/* Defined in Table 35-10 from SDM (Sept 2015) */
+	static int airmont_freq_table[] = {
+		83300, 100000, 133300, 116700, 80000,
+		93300, 90000, 88900, 87500};
+
+	rdmsrl(MSR_FSB_FREQ, value);
+	i = value & 0xF;
+	WARN_ON(i > 8);
+
+	return airmont_freq_table[i];
+}
+
+static void atom_get_vid(struct cpudata *cpudata)
 {
 	u64 value;
 
-	rdmsrl(BYT_VIDS, value);
+	rdmsrl(ATOM_VIDS, value);
 	cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
 	cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
 	cpudata->vid.ratio = div_fp(
@@ -764,7 +620,7 @@
 		int_tofp(cpudata->pstate.max_pstate -
 			cpudata->pstate.min_pstate));
 
-	rdmsrl(BYT_TURBO_VIDS, value);
+	rdmsrl(ATOM_TURBO_VIDS, value);
 	cpudata->vid.turbo = value & 0x7f;
 }
 
@@ -885,7 +741,7 @@
 	},
 };
 
-static struct cpu_defaults byt_params = {
+static struct cpu_defaults silvermont_params = {
 	.pid_policy = {
 		.sample_rate_ms = 10,
 		.deadband = 0,
@@ -895,13 +751,33 @@
 		.i_gain_pct = 4,
 	},
 	.funcs = {
-		.get_max = byt_get_max_pstate,
-		.get_max_physical = byt_get_max_pstate,
-		.get_min = byt_get_min_pstate,
-		.get_turbo = byt_get_turbo_pstate,
-		.set = byt_set_pstate,
-		.get_scaling = byt_get_scaling,
-		.get_vid = byt_get_vid,
+		.get_max = atom_get_max_pstate,
+		.get_max_physical = atom_get_max_pstate,
+		.get_min = atom_get_min_pstate,
+		.get_turbo = atom_get_turbo_pstate,
+		.set = atom_set_pstate,
+		.get_scaling = silvermont_get_scaling,
+		.get_vid = atom_get_vid,
+	},
+};
+
+static struct cpu_defaults airmont_params = {
+	.pid_policy = {
+		.sample_rate_ms = 10,
+		.deadband = 0,
+		.setpoint = 60,
+		.p_gain_pct = 14,
+		.d_gain_pct = 0,
+		.i_gain_pct = 4,
+	},
+	.funcs = {
+		.get_max = atom_get_max_pstate,
+		.get_max_physical = atom_get_max_pstate,
+		.get_min = atom_get_min_pstate,
+		.get_turbo = atom_get_turbo_pstate,
+		.set = atom_set_pstate,
+		.get_scaling = airmont_get_scaling,
+		.get_vid = atom_get_vid,
 	},
 };
 
@@ -938,23 +814,12 @@
 	 * policy, or by cpu specific default values determined through
 	 * experimentation.
 	 */
-	if (limits->max_perf_ctl && limits->max_sysfs_pct >=
-						limits->max_policy_pct) {
-		*max = limits->max_perf_ctl;
-	} else {
-		max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf),
-					limits->max_perf));
-		*max = clamp_t(int, max_perf_adj, cpu->pstate.min_pstate,
-			       cpu->pstate.turbo_pstate);
-	}
+	max_perf_adj = fp_toint(mul_fp(int_tofp(max_perf), limits->max_perf));
+	*max = clamp_t(int, max_perf_adj,
+			cpu->pstate.min_pstate, cpu->pstate.turbo_pstate);
 
-	if (limits->min_perf_ctl) {
-		*min = limits->min_perf_ctl;
-	} else {
-		min_perf = fp_toint(mul_fp(int_tofp(max_perf),
-				    limits->min_perf));
-		*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
-	}
+	min_perf = fp_toint(mul_fp(int_tofp(max_perf), limits->min_perf));
+	*min = clamp_t(int, min_perf, cpu->pstate.min_pstate, max_perf);
 }
 
 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate, bool force)
@@ -1153,7 +1018,7 @@
 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
 	ICPU(0x2a, core_params),
 	ICPU(0x2d, core_params),
-	ICPU(0x37, byt_params),
+	ICPU(0x37, silvermont_params),
 	ICPU(0x3a, core_params),
 	ICPU(0x3c, core_params),
 	ICPU(0x3d, core_params),
@@ -1162,7 +1027,7 @@
 	ICPU(0x45, core_params),
 	ICPU(0x46, core_params),
 	ICPU(0x47, core_params),
-	ICPU(0x4c, byt_params),
+	ICPU(0x4c, airmont_params),
 	ICPU(0x4e, core_params),
 	ICPU(0x4f, core_params),
 	ICPU(0x5e, core_params),
@@ -1229,12 +1094,6 @@
 
 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
 {
-#if IS_ENABLED(CONFIG_ACPI)
-	struct cpudata *cpu;
-	int i;
-#endif
-	pr_debug("intel_pstate: %s max %u policy->max %u\n", __func__,
-		 policy->cpuinfo.max_freq, policy->max);
 	if (!policy->cpuinfo.max_freq)
 		return -ENODEV;
 
@@ -1270,23 +1129,6 @@
 	limits->max_perf = div_fp(int_tofp(limits->max_perf_pct),
 				  int_tofp(100));
 
-#if IS_ENABLED(CONFIG_ACPI)
-	cpu = all_cpu_data[policy->cpu];
-	for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
-		int control;
-
-		control = convert_to_native_pstate_format(cpu, i);
-		if (control * cpu->pstate.scaling == policy->max)
-			limits->max_perf_ctl = control;
-		if (control * cpu->pstate.scaling == policy->min)
-			limits->min_perf_ctl = control;
-	}
-
-	pr_debug("intel_pstate: max %u policy_max %u perf_ctl [0x%x-0x%x]\n",
-		 policy->cpuinfo.max_freq, policy->max, limits->min_perf_ctl,
-		 limits->max_perf_ctl);
-#endif
-
 	if (hwp_active)
 		intel_pstate_hwp_set();
 
@@ -1341,30 +1183,18 @@
 	policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
 	policy->cpuinfo.max_freq =
 		cpu->pstate.turbo_pstate * cpu->pstate.scaling;
-	if (!no_acpi_perf)
-		intel_pstate_init_perf_limits(policy);
-	/*
-	 * If there is no acpi perf data or error, we ignore and use Intel P
-	 * state calculated limits, So this is not fatal error.
-	 */
 	policy->cpuinfo.transition_latency = CPUFREQ_ETERNAL;
 	cpumask_set_cpu(policy->cpu, policy->cpus);
 
 	return 0;
 }
 
-static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
-{
-	return intel_pstate_exit_perf_limits(policy);
-}
-
 static struct cpufreq_driver intel_pstate_driver = {
 	.flags		= CPUFREQ_CONST_LOOPS,
 	.verify		= intel_pstate_verify_policy,
 	.setpolicy	= intel_pstate_set_policy,
 	.get		= intel_pstate_get,
 	.init		= intel_pstate_cpu_init,
-	.exit		= intel_pstate_cpu_exit,
 	.stop_cpu	= intel_pstate_stop_cpu,
 	.name		= "intel_pstate",
 };
@@ -1406,6 +1236,7 @@
 }
 
 #if IS_ENABLED(CONFIG_ACPI)
+#include <acpi/processor.h>
 
 static bool intel_pstate_no_acpi_pss(void)
 {
@@ -1601,9 +1432,6 @@
 		force_load = 1;
 	if (!strcmp(str, "hwp_only"))
 		hwp_only = 1;
-	if (!strcmp(str, "no_acpi"))
-		no_acpi_perf = 1;
-
 	return 0;
 }
 early_param("intel_pstate", intel_pstate_setup);
diff --git a/drivers/crypto/qat/qat_common/adf_ctl_drv.c b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
index 03856ad..473d36d 100644
--- a/drivers/crypto/qat/qat_common/adf_ctl_drv.c
+++ b/drivers/crypto/qat/qat_common/adf_ctl_drv.c
@@ -198,7 +198,7 @@
 			goto out_err;
 		}
 
-		params_head = section_head->params;
+		params_head = section.params;
 
 		while (params_head) {
 			if (copy_from_user(&key_val, (void __user *)params_head,
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index 4e55239..53d22eb 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -729,8 +729,8 @@
 		return NULL;
 
 	dev_info(chan2dev(chan),
-		 "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
-		__func__, xt->src_start, xt->dst_start, xt->numf,
+		 "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
+		__func__, &xt->src_start, &xt->dst_start, xt->numf,
 		xt->frame_size, flags);
 
 	/*
@@ -824,8 +824,8 @@
 	u32			ctrla;
 	u32			ctrlb;
 
-	dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d0x%x s0x%x l0x%zx f0x%lx\n",
-			dest, src, len, flags);
+	dev_vdbg(chan2dev(chan), "prep_dma_memcpy: d%pad s%pad l0x%zx f0x%lx\n",
+			&dest, &src, len, flags);
 
 	if (unlikely(!len)) {
 		dev_dbg(chan2dev(chan), "prep_dma_memcpy: length is zero!\n");
@@ -938,8 +938,8 @@
 	void __iomem		*vaddr;
 	dma_addr_t		paddr;
 
-	dev_vdbg(chan2dev(chan), "%s: d0x%x v0x%x l0x%zx f0x%lx\n", __func__,
-		dest, value, len, flags);
+	dev_vdbg(chan2dev(chan), "%s: d%pad v0x%x l0x%zx f0x%lx\n", __func__,
+		&dest, value, len, flags);
 
 	if (unlikely(!len)) {
 		dev_dbg(chan2dev(chan), "%s: length is zero!\n", __func__);
@@ -1022,8 +1022,8 @@
 		dma_addr_t dest = sg_dma_address(sg);
 		size_t len = sg_dma_len(sg);
 
-		dev_vdbg(chan2dev(chan), "%s: d0x%08x, l0x%zx\n",
-			 __func__, dest, len);
+		dev_vdbg(chan2dev(chan), "%s: d%pad, l0x%zx\n",
+			 __func__, &dest, len);
 
 		if (!is_dma_fill_aligned(chan->device, dest, 0, len)) {
 			dev_err(chan2dev(chan), "%s: buffer is not aligned\n",
@@ -1439,9 +1439,9 @@
 	unsigned int		periods = buf_len / period_len;
 	unsigned int		i;
 
-	dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@0x%08x - %d (%d/%d)\n",
+	dev_vdbg(chan2dev(chan), "prep_dma_cyclic: %s buf@%pad - %d (%d/%d)\n",
 			direction == DMA_MEM_TO_DEV ? "TO DEVICE" : "FROM DEVICE",
-			buf_addr,
+			&buf_addr,
 			periods, buf_len, period_len);
 
 	if (unlikely(!atslave || !buf_len || !period_len)) {
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index d1cfc8c..7f58f06 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -385,9 +385,9 @@
 static void atc_dump_lli(struct at_dma_chan *atchan, struct at_lli *lli)
 {
 	dev_crit(chan2dev(&atchan->chan_common),
-		 "  desc: s0x%x d0x%x ctrl0x%x:0x%x l0x%x\n",
-		 lli->saddr, lli->daddr,
-		 lli->ctrla, lli->ctrlb, lli->dscr);
+		 "  desc: s%pad d%pad ctrl0x%x:0x%x l0x%pad\n",
+		 &lli->saddr, &lli->daddr,
+		 lli->ctrla, lli->ctrlb, &lli->dscr);
 }
 
 
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index b5e132d..7f039de 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -920,8 +920,8 @@
 	desc->lld.mbr_cfg = chan_cc;
 
 	dev_dbg(chan2dev(chan),
-		"%s: lld: mbr_sa=0x%08x, mbr_da=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
-		__func__, desc->lld.mbr_sa, desc->lld.mbr_da,
+		"%s: lld: mbr_sa=%pad, mbr_da=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
+		__func__, &desc->lld.mbr_sa, &desc->lld.mbr_da,
 		desc->lld.mbr_ubc, desc->lld.mbr_cfg);
 
 	/* Chain lld. */
@@ -953,8 +953,8 @@
 	if ((xt->numf > 1) && (xt->frame_size > 1))
 		return NULL;
 
-	dev_dbg(chan2dev(chan), "%s: src=0x%08x, dest=0x%08x, numf=%d, frame_size=%d, flags=0x%lx\n",
-		__func__, xt->src_start, xt->dst_start,	xt->numf,
+	dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, numf=%d, frame_size=%d, flags=0x%lx\n",
+		__func__, &xt->src_start, &xt->dst_start,	xt->numf,
 		xt->frame_size, flags);
 
 	src_addr = xt->src_start;
@@ -1179,8 +1179,8 @@
 	desc->lld.mbr_cfg = chan_cc;
 
 	dev_dbg(chan2dev(chan),
-		"%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
-		__func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
+		"%s: lld: mbr_da=%pad, mbr_ds=%pad, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
+		__func__, &desc->lld.mbr_da, &desc->lld.mbr_ds, desc->lld.mbr_ubc,
 		desc->lld.mbr_cfg);
 
 	return desc;
@@ -1193,8 +1193,8 @@
 	struct at_xdmac_chan	*atchan = to_at_xdmac_chan(chan);
 	struct at_xdmac_desc	*desc;
 
-	dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
-		__func__, dest, len, value, flags);
+	dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
+		__func__, &dest, len, value, flags);
 
 	if (unlikely(!len))
 		return NULL;
@@ -1229,8 +1229,8 @@
 
 	/* Prepare descriptors. */
 	for_each_sg(sgl, sg, sg_len, i) {
-		dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
-			__func__, sg_dma_address(sg), sg_dma_len(sg),
+		dev_dbg(chan2dev(chan), "%s: dest=%pad, len=%d, pattern=0x%x, flags=0x%lx\n",
+			__func__, &sg_dma_address(sg), sg_dma_len(sg),
 			value, flags);
 		desc = at_xdmac_memset_create_desc(chan, atchan,
 						   sg_dma_address(sg),
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index 6b03e4e..0675e26 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -107,7 +107,7 @@
 
 /* CCCFG register */
 #define GET_NUM_DMACH(x)	(x & 0x7) /* bits 0-2 */
-#define GET_NUM_QDMACH(x)	(x & 0x70 >> 4) /* bits 4-6 */
+#define GET_NUM_QDMACH(x)	((x & 0x70) >> 4) /* bits 4-6 */
 #define GET_NUM_PAENTRY(x)	((x & 0x7000) >> 12) /* bits 12-14 */
 #define GET_NUM_EVQUE(x)	((x & 0x70000) >> 16) /* bits 16-18 */
 #define GET_NUM_REGN(x)		((x & 0x300000) >> 20) /* bits 20-21 */
@@ -1565,7 +1565,7 @@
 	struct platform_device *tc_pdev;
 	int ret;
 
-	if (!tc)
+	if (!IS_ENABLED(CONFIG_OF) || !tc)
 		return;
 
 	tc_pdev = of_find_device_by_node(tc->node);
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index 7058d58..0f6fd42 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -1462,7 +1462,7 @@
 
 #define EVENT_REMAP_CELLS 3
 
-static int __init sdma_event_remap(struct sdma_engine *sdma)
+static int sdma_event_remap(struct sdma_engine *sdma)
 {
 	struct device_node *np = sdma->dev->of_node;
 	struct device_node *gpr_np = of_parse_phandle(np, "gpr", 0);
diff --git a/drivers/dma/sh/usb-dmac.c b/drivers/dma/sh/usb-dmac.c
index ebd8a5f..f1bcc2a 100644
--- a/drivers/dma/sh/usb-dmac.c
+++ b/drivers/dma/sh/usb-dmac.c
@@ -679,8 +679,11 @@
 	struct usb_dmac *dmac = dev_get_drvdata(dev);
 	int i;
 
-	for (i = 0; i < dmac->n_channels; ++i)
+	for (i = 0; i < dmac->n_channels; ++i) {
+		if (!dmac->channels[i].iomem)
+			break;
 		usb_dmac_chan_halt(&dmac->channels[i]);
+	}
 
 	return 0;
 }
@@ -799,11 +802,10 @@
 	ret = pm_runtime_get_sync(&pdev->dev);
 	if (ret < 0) {
 		dev_err(&pdev->dev, "runtime PM get sync failed (%d)\n", ret);
-		return ret;
+		goto error_pm;
 	}
 
 	ret = usb_dmac_init(dmac);
-	pm_runtime_put(&pdev->dev);
 
 	if (ret) {
 		dev_err(&pdev->dev, "failed to reset device\n");
@@ -851,10 +853,13 @@
 	if (ret < 0)
 		goto error;
 
+	pm_runtime_put(&pdev->dev);
 	return 0;
 
 error:
 	of_dma_controller_free(pdev->dev.of_node);
+	pm_runtime_put(&pdev->dev);
+error_pm:
 	pm_runtime_disable(&pdev->dev);
 	return ret;
 }
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 615ce6d..306f757 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -389,7 +389,6 @@
  * Fences.
  */
 struct amdgpu_fence_driver {
-	struct amdgpu_ring		*ring;
 	uint64_t			gpu_addr;
 	volatile uint32_t		*cpu_addr;
 	/* sync_seq is protected by ring emission lock */
@@ -398,7 +397,7 @@
 	bool				initialized;
 	struct amdgpu_irq_src		*irq_src;
 	unsigned			irq_type;
-	struct delayed_work             lockup_work;
+	struct timer_list		fallback_timer;
 	wait_queue_head_t		fence_queue;
 };
 
@@ -917,8 +916,8 @@
 #define AMDGPU_VM_FAULT_STOP_ALWAYS	2
 
 struct amdgpu_vm_pt {
-	struct amdgpu_bo		*bo;
-	uint64_t			addr;
+	struct amdgpu_bo	*bo;
+	uint64_t		addr;
 };
 
 struct amdgpu_vm_id {
@@ -926,8 +925,6 @@
 	uint64_t		pd_gpu_addr;
 	/* last flushed PD/PT update */
 	struct fence	        *flushed_updates;
-	/* last use of vmid */
-	struct fence		*last_id_use;
 };
 
 struct amdgpu_vm {
@@ -957,24 +954,70 @@
 
 	/* for id and flush management per ring */
 	struct amdgpu_vm_id	ids[AMDGPU_MAX_RINGS];
+	/* for interval tree */
+	spinlock_t		it_lock;
 };
 
 struct amdgpu_vm_manager {
-	struct fence			*active[AMDGPU_NUM_VM];
-	uint32_t			max_pfn;
+	struct {
+		struct fence	*active;
+		atomic_long_t	owner;
+	} ids[AMDGPU_NUM_VM];
+
+	uint32_t				max_pfn;
 	/* number of VMIDs */
-	unsigned			nvm;
+	unsigned				nvm;
 	/* vram base address for page table entry  */
-	u64				vram_base_offset;
+	u64					vram_base_offset;
 	/* is vm enabled? */
-	bool				enabled;
-	/* for hw to save the PD addr on suspend/resume */
-	uint32_t			saved_table_addr[AMDGPU_NUM_VM];
+	bool					enabled;
 	/* vm pte handling */
 	const struct amdgpu_vm_pte_funcs        *vm_pte_funcs;
 	struct amdgpu_ring                      *vm_pte_funcs_ring;
 };
 
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev);
+int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
+struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
+					       struct amdgpu_vm *vm,
+					       struct list_head *head);
+int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
+		      struct amdgpu_sync *sync);
+void amdgpu_vm_flush(struct amdgpu_ring *ring,
+		     struct amdgpu_vm *vm,
+		     struct fence *updates);
+void amdgpu_vm_fence(struct amdgpu_device *adev,
+		     struct amdgpu_vm *vm,
+		     struct fence *fence);
+uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
+int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
+				    struct amdgpu_vm *vm);
+int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
+			  struct amdgpu_vm *vm);
+int amdgpu_vm_clear_invalids(struct amdgpu_device *adev, struct amdgpu_vm *vm,
+			     struct amdgpu_sync *sync);
+int amdgpu_vm_bo_update(struct amdgpu_device *adev,
+			struct amdgpu_bo_va *bo_va,
+			struct ttm_mem_reg *mem);
+void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
+			     struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
+				       struct amdgpu_bo *bo);
+struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
+				      struct amdgpu_vm *vm,
+				      struct amdgpu_bo *bo);
+int amdgpu_vm_bo_map(struct amdgpu_device *adev,
+		     struct amdgpu_bo_va *bo_va,
+		     uint64_t addr, uint64_t offset,
+		     uint64_t size, uint32_t flags);
+int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
+		       struct amdgpu_bo_va *bo_va,
+		       uint64_t addr);
+void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
+		      struct amdgpu_bo_va *bo_va);
+int amdgpu_vm_free_job(struct amdgpu_job *job);
+
 /*
  * context related structures
  */
@@ -1211,6 +1254,7 @@
 	/* relocations */
 	struct amdgpu_bo_list_entry	*vm_bos;
 	struct list_head	validated;
+	struct fence		*fence;
 
 	struct amdgpu_ib	*ibs;
 	uint32_t		num_ibs;
@@ -1226,7 +1270,7 @@
 	struct amdgpu_device	*adev;
 	struct amdgpu_ib	*ibs;
 	uint32_t		num_ibs;
-	struct mutex            job_lock;
+	void			*owner;
 	struct amdgpu_user_fence uf;
 	int (*free_job)(struct amdgpu_job *job);
 };
@@ -2257,11 +2301,6 @@
 bool amdgpu_card_posted(struct amdgpu_device *adev);
 void amdgpu_update_display_priority(struct amdgpu_device *adev);
 bool amdgpu_boot_test_post_card(struct amdgpu_device *adev);
-struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
-						 struct drm_file *filp,
-						 struct amdgpu_ctx *ctx,
-						 struct amdgpu_ib *ibs,
-						 uint32_t num_ibs);
 
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data);
 int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
@@ -2319,49 +2358,6 @@
 			     unsigned long arg);
 
 /*
- * vm
- */
-int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm);
-struct amdgpu_bo_list_entry *amdgpu_vm_get_bos(struct amdgpu_device *adev,
-					  struct amdgpu_vm *vm,
-					  struct list_head *head);
-int amdgpu_vm_grab_id(struct amdgpu_vm *vm, struct amdgpu_ring *ring,
-		      struct amdgpu_sync *sync);
-void amdgpu_vm_flush(struct amdgpu_ring *ring,
-		     struct amdgpu_vm *vm,
-		     struct fence *updates);
-void amdgpu_vm_fence(struct amdgpu_device *adev,
-		     struct amdgpu_vm *vm,
-		     struct amdgpu_fence *fence);
-uint64_t amdgpu_vm_map_gart(struct amdgpu_device *adev, uint64_t addr);
-int amdgpu_vm_update_page_directory(struct amdgpu_device *adev,
-				    struct amdgpu_vm *vm);
-int amdgpu_vm_clear_freed(struct amdgpu_device *adev,
-				struct amdgpu_vm *vm);
-int amdgpu_vm_clear_invalids(struct amdgpu_device *adev,
-				struct amdgpu_vm *vm, struct amdgpu_sync *sync);
-int amdgpu_vm_bo_update(struct amdgpu_device *adev,
-			struct amdgpu_bo_va *bo_va,
-			struct ttm_mem_reg *mem);
-void amdgpu_vm_bo_invalidate(struct amdgpu_device *adev,
-			     struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_find(struct amdgpu_vm *vm,
-				       struct amdgpu_bo *bo);
-struct amdgpu_bo_va *amdgpu_vm_bo_add(struct amdgpu_device *adev,
-				      struct amdgpu_vm *vm,
-				      struct amdgpu_bo *bo);
-int amdgpu_vm_bo_map(struct amdgpu_device *adev,
-		     struct amdgpu_bo_va *bo_va,
-		     uint64_t addr, uint64_t offset,
-		     uint64_t size, uint32_t flags);
-int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
-		       struct amdgpu_bo_va *bo_va,
-		       uint64_t addr);
-void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
-		      struct amdgpu_bo_va *bo_va);
-int amdgpu_vm_free_job(struct amdgpu_job *job);
-/*
  * functions used by amdgpu_encoder.c
  */
 struct amdgpu_afmt_acr {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index dfc4d02..3afcf02 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -127,30 +127,6 @@
 	return 0;
 }
 
-struct amdgpu_cs_parser *amdgpu_cs_parser_create(struct amdgpu_device *adev,
-                                               struct drm_file *filp,
-                                               struct amdgpu_ctx *ctx,
-                                               struct amdgpu_ib *ibs,
-                                               uint32_t num_ibs)
-{
-	struct amdgpu_cs_parser *parser;
-	int i;
-
-	parser = kzalloc(sizeof(struct amdgpu_cs_parser), GFP_KERNEL);
-	if (!parser)
-		return NULL;
-
-	parser->adev = adev;
-	parser->filp = filp;
-	parser->ctx = ctx;
-	parser->ibs = ibs;
-	parser->num_ibs = num_ibs;
-	for (i = 0; i < num_ibs; i++)
-		ibs[i].ctx = ctx;
-
-	return parser;
-}
-
 int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, void *data)
 {
 	union drm_amdgpu_cs *cs = data;
@@ -463,8 +439,18 @@
 	return (int)la->robj->tbo.num_pages - (int)lb->robj->tbo.num_pages;
 }
 
-static void amdgpu_cs_parser_fini_early(struct amdgpu_cs_parser *parser, int error, bool backoff)
+/**
+ * cs_parser_fini() - clean parser states
+ * @parser:	parser structure holding parsing context.
+ * @error:	error number
+ *
+ * If error is set than unvalidate buffer, otherwise just free memory
+ * used by parsing context.
+ **/
+static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
 {
+	unsigned i;
+
 	if (!error) {
 		/* Sort the buffer list from the smallest to largest buffer,
 		 * which affects the order of buffers in the LRU list.
@@ -479,17 +465,14 @@
 		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
 
 		ttm_eu_fence_buffer_objects(&parser->ticket,
-				&parser->validated,
-				&parser->ibs[parser->num_ibs-1].fence->base);
+					    &parser->validated,
+					    parser->fence);
 	} else if (backoff) {
 		ttm_eu_backoff_reservation(&parser->ticket,
 					   &parser->validated);
 	}
-}
+	fence_put(parser->fence);
 
-static void amdgpu_cs_parser_fini_late(struct amdgpu_cs_parser *parser)
-{
-	unsigned i;
 	if (parser->ctx)
 		amdgpu_ctx_put(parser->ctx);
 	if (parser->bo_list)
@@ -499,31 +482,12 @@
 	for (i = 0; i < parser->nchunks; i++)
 		drm_free_large(parser->chunks[i].kdata);
 	kfree(parser->chunks);
-	if (!amdgpu_enable_scheduler)
-	{
-		if (parser->ibs)
-			for (i = 0; i < parser->num_ibs; i++)
-				amdgpu_ib_free(parser->adev, &parser->ibs[i]);
-		kfree(parser->ibs);
-		if (parser->uf.bo)
-			drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
-	}
-
-	kfree(parser);
-}
-
-/**
- * cs_parser_fini() - clean parser states
- * @parser:	parser structure holding parsing context.
- * @error:	error number
- *
- * If error is set than unvalidate buffer, otherwise just free memory
- * used by parsing context.
- **/
-static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error, bool backoff)
-{
-       amdgpu_cs_parser_fini_early(parser, error, backoff);
-       amdgpu_cs_parser_fini_late(parser);
+	if (parser->ibs)
+		for (i = 0; i < parser->num_ibs; i++)
+			amdgpu_ib_free(parser->adev, &parser->ibs[i]);
+	kfree(parser->ibs);
+	if (parser->uf.bo)
+		drm_gem_object_unreference_unlocked(&parser->uf.bo->gem_base);
 }
 
 static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p,
@@ -610,15 +574,9 @@
 	}
 
 	r = amdgpu_bo_vm_update_pte(parser, vm);
-	if (r) {
-		goto out;
-	}
-	amdgpu_cs_sync_rings(parser);
-	if (!amdgpu_enable_scheduler)
-		r = amdgpu_ib_schedule(adev, parser->num_ibs, parser->ibs,
-				       parser->filp);
+	if (!r)
+		amdgpu_cs_sync_rings(parser);
 
-out:
 	return r;
 }
 
@@ -828,36 +786,36 @@
 	union drm_amdgpu_cs *cs = data;
 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 	struct amdgpu_vm *vm = &fpriv->vm;
-	struct amdgpu_cs_parser *parser;
+	struct amdgpu_cs_parser parser = {};
 	bool reserved_buffers = false;
 	int i, r;
 
 	if (!adev->accel_working)
 		return -EBUSY;
 
-	parser = amdgpu_cs_parser_create(adev, filp, NULL, NULL, 0);
-	if (!parser)
-		return -ENOMEM;
-	r = amdgpu_cs_parser_init(parser, data);
+	parser.adev = adev;
+	parser.filp = filp;
+
+	r = amdgpu_cs_parser_init(&parser, data);
 	if (r) {
 		DRM_ERROR("Failed to initialize parser !\n");
-		amdgpu_cs_parser_fini(parser, r, false);
+		amdgpu_cs_parser_fini(&parser, r, false);
 		r = amdgpu_cs_handle_lockup(adev, r);
 		return r;
 	}
 	mutex_lock(&vm->mutex);
-	r = amdgpu_cs_parser_relocs(parser);
+	r = amdgpu_cs_parser_relocs(&parser);
 	if (r == -ENOMEM)
 		DRM_ERROR("Not enough memory for command submission!\n");
 	else if (r && r != -ERESTARTSYS)
 		DRM_ERROR("Failed to process the buffer list %d!\n", r);
 	else if (!r) {
 		reserved_buffers = true;
-		r = amdgpu_cs_ib_fill(adev, parser);
+		r = amdgpu_cs_ib_fill(adev, &parser);
 	}
 
 	if (!r) {
-		r = amdgpu_cs_dependencies(adev, parser);
+		r = amdgpu_cs_dependencies(adev, &parser);
 		if (r)
 			DRM_ERROR("Failed in the dependencies handling %d!\n", r);
 	}
@@ -865,62 +823,71 @@
 	if (r)
 		goto out;
 
-	for (i = 0; i < parser->num_ibs; i++)
-		trace_amdgpu_cs(parser, i);
+	for (i = 0; i < parser.num_ibs; i++)
+		trace_amdgpu_cs(&parser, i);
 
-	r = amdgpu_cs_ib_vm_chunk(adev, parser);
+	r = amdgpu_cs_ib_vm_chunk(adev, &parser);
 	if (r)
 		goto out;
 
-	if (amdgpu_enable_scheduler && parser->num_ibs) {
+	if (amdgpu_enable_scheduler && parser.num_ibs) {
+		struct amdgpu_ring * ring = parser.ibs->ring;
+		struct amd_sched_fence *fence;
 		struct amdgpu_job *job;
-		struct amdgpu_ring * ring =  parser->ibs->ring;
+
 		job = kzalloc(sizeof(struct amdgpu_job), GFP_KERNEL);
 		if (!job) {
 			r = -ENOMEM;
 			goto out;
 		}
+
 		job->base.sched = &ring->sched;
-		job->base.s_entity = &parser->ctx->rings[ring->idx].entity;
-		job->adev = parser->adev;
-		job->ibs = parser->ibs;
-		job->num_ibs = parser->num_ibs;
-		job->base.owner = parser->filp;
-		mutex_init(&job->job_lock);
+		job->base.s_entity = &parser.ctx->rings[ring->idx].entity;
+		job->adev = parser.adev;
+		job->owner = parser.filp;
+		job->free_job = amdgpu_cs_free_job;
+
+		job->ibs = parser.ibs;
+		job->num_ibs = parser.num_ibs;
+		parser.ibs = NULL;
+		parser.num_ibs = 0;
+
 		if (job->ibs[job->num_ibs - 1].user) {
-			memcpy(&job->uf,  &parser->uf,
-			       sizeof(struct amdgpu_user_fence));
+			job->uf = parser.uf;
 			job->ibs[job->num_ibs - 1].user = &job->uf;
+			parser.uf.bo = NULL;
 		}
 
-		job->free_job = amdgpu_cs_free_job;
-		mutex_lock(&job->job_lock);
-		r = amd_sched_entity_push_job(&job->base);
-		if (r) {
-			mutex_unlock(&job->job_lock);
+		fence = amd_sched_fence_create(job->base.s_entity,
+					       parser.filp);
+		if (!fence) {
+			r = -ENOMEM;
 			amdgpu_cs_free_job(job);
 			kfree(job);
 			goto out;
 		}
-		cs->out.handle =
-			amdgpu_ctx_add_fence(parser->ctx, ring,
-					     &job->base.s_fence->base);
-		parser->ibs[parser->num_ibs - 1].sequence = cs->out.handle;
+		job->base.s_fence = fence;
+		parser.fence = fence_get(&fence->base);
 
-		list_sort(NULL, &parser->validated, cmp_size_smaller_first);
-		ttm_eu_fence_buffer_objects(&parser->ticket,
-				&parser->validated,
-				&job->base.s_fence->base);
+		cs->out.handle = amdgpu_ctx_add_fence(parser.ctx, ring,
+						      &fence->base);
+		job->ibs[job->num_ibs - 1].sequence = cs->out.handle;
 
-		mutex_unlock(&job->job_lock);
-		amdgpu_cs_parser_fini_late(parser);
-		mutex_unlock(&vm->mutex);
-		return 0;
+		trace_amdgpu_cs_ioctl(job);
+		amd_sched_entity_push_job(&job->base);
+
+	} else {
+		struct amdgpu_fence *fence;
+
+		r = amdgpu_ib_schedule(adev, parser.num_ibs, parser.ibs,
+				       parser.filp);
+		fence = parser.ibs[parser.num_ibs - 1].fence;
+		parser.fence = fence_get(&fence->base);
+		cs->out.handle = parser.ibs[parser.num_ibs - 1].sequence;
 	}
 
-	cs->out.handle = parser->ibs[parser->num_ibs - 1].sequence;
 out:
-	amdgpu_cs_parser_fini(parser, r, reserved_buffers);
+	amdgpu_cs_parser_fini(&parser, r, reserved_buffers);
 	mutex_unlock(&vm->mutex);
 	r = amdgpu_cs_handle_lockup(adev, r);
 	return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
index 257d722..3671f9f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fence.c
@@ -47,6 +47,9 @@
  * that the the relevant GPU caches have been flushed.
  */
 
+static struct kmem_cache *amdgpu_fence_slab;
+static atomic_t amdgpu_fence_slab_ref = ATOMIC_INIT(0);
+
 /**
  * amdgpu_fence_write - write a fence value
  *
@@ -85,24 +88,6 @@
 }
 
 /**
- * amdgpu_fence_schedule_check - schedule lockup check
- *
- * @ring: pointer to struct amdgpu_ring
- *
- * Queues a delayed work item to check for lockups.
- */
-static void amdgpu_fence_schedule_check(struct amdgpu_ring *ring)
-{
-	/*
-	 * Do not reset the timer here with mod_delayed_work,
-	 * this can livelock in an interaction with TTM delayed destroy.
-	 */
-	queue_delayed_work(system_power_efficient_wq,
-		&ring->fence_drv.lockup_work,
-		AMDGPU_FENCE_JIFFIES_TIMEOUT);
-}
-
-/**
  * amdgpu_fence_emit - emit a fence on the requested ring
  *
  * @ring: ring the fence is associated with
@@ -118,7 +103,7 @@
 	struct amdgpu_device *adev = ring->adev;
 
 	/* we are protected by the ring emission mutex */
-	*fence = kmalloc(sizeof(struct amdgpu_fence), GFP_KERNEL);
+	*fence = kmem_cache_alloc(amdgpu_fence_slab, GFP_KERNEL);
 	if ((*fence) == NULL) {
 		return -ENOMEM;
 	}
@@ -132,11 +117,23 @@
 	amdgpu_ring_emit_fence(ring, ring->fence_drv.gpu_addr,
 			       (*fence)->seq,
 			       AMDGPU_FENCE_FLAG_INT);
-	trace_amdgpu_fence_emit(ring->adev->ddev, ring->idx, (*fence)->seq);
 	return 0;
 }
 
 /**
+ * amdgpu_fence_schedule_fallback - schedule fallback check
+ *
+ * @ring: pointer to struct amdgpu_ring
+ *
+ * Start a timer as fallback to our interrupts.
+ */
+static void amdgpu_fence_schedule_fallback(struct amdgpu_ring *ring)
+{
+	mod_timer(&ring->fence_drv.fallback_timer,
+		  jiffies + AMDGPU_FENCE_JIFFIES_TIMEOUT);
+}
+
+/**
  * amdgpu_fence_activity - check for fence activity
  *
  * @ring: pointer to struct amdgpu_ring
@@ -202,33 +199,12 @@
 	} while (atomic64_xchg(&ring->fence_drv.last_seq, seq) > seq);
 
 	if (seq < last_emitted)
-		amdgpu_fence_schedule_check(ring);
+		amdgpu_fence_schedule_fallback(ring);
 
 	return wake;
 }
 
 /**
- * amdgpu_fence_check_lockup - check for hardware lockup
- *
- * @work: delayed work item
- *
- * Checks for fence activity and if there is none probe
- * the hardware if a lockup occured.
- */
-static void amdgpu_fence_check_lockup(struct work_struct *work)
-{
-	struct amdgpu_fence_driver *fence_drv;
-	struct amdgpu_ring *ring;
-
-	fence_drv = container_of(work, struct amdgpu_fence_driver,
-				lockup_work.work);
-	ring = fence_drv->ring;
-
-	if (amdgpu_fence_activity(ring))
-		wake_up_all(&ring->fence_drv.fence_queue);
-}
-
-/**
  * amdgpu_fence_process - process a fence
  *
  * @adev: amdgpu_device pointer
@@ -244,6 +220,20 @@
 }
 
 /**
+ * amdgpu_fence_fallback - fallback for hardware interrupts
+ *
+ * @work: delayed work item
+ *
+ * Checks for fence activity.
+ */
+static void amdgpu_fence_fallback(unsigned long arg)
+{
+	struct amdgpu_ring *ring = (void *)arg;
+
+	amdgpu_fence_process(ring);
+}
+
+/**
  * amdgpu_fence_seq_signaled - check if a fence sequence number has signaled
  *
  * @ring: ring the fence is associated with
@@ -290,7 +280,7 @@
 	if (atomic64_read(&ring->fence_drv.last_seq) >= seq)
 		return 0;
 
-	amdgpu_fence_schedule_check(ring);
+	amdgpu_fence_schedule_fallback(ring);
 	wait_event(ring->fence_drv.fence_queue, (
 		   (signaled = amdgpu_fence_seq_signaled(ring, seq))));
 
@@ -491,9 +481,8 @@
 	atomic64_set(&ring->fence_drv.last_seq, 0);
 	ring->fence_drv.initialized = false;
 
-	INIT_DELAYED_WORK(&ring->fence_drv.lockup_work,
-			amdgpu_fence_check_lockup);
-	ring->fence_drv.ring = ring;
+	setup_timer(&ring->fence_drv.fallback_timer, amdgpu_fence_fallback,
+		    (unsigned long)ring);
 
 	init_waitqueue_head(&ring->fence_drv.fence_queue);
 
@@ -536,6 +525,13 @@
  */
 int amdgpu_fence_driver_init(struct amdgpu_device *adev)
 {
+	if (atomic_inc_return(&amdgpu_fence_slab_ref) == 1) {
+		amdgpu_fence_slab = kmem_cache_create(
+			"amdgpu_fence", sizeof(struct amdgpu_fence), 0,
+			SLAB_HWCACHE_ALIGN, NULL);
+		if (!amdgpu_fence_slab)
+			return -ENOMEM;
+	}
 	if (amdgpu_debugfs_fence_init(adev))
 		dev_err(adev->dev, "fence debugfs file creation failed\n");
 
@@ -554,9 +550,12 @@
 {
 	int i, r;
 
+	if (atomic_dec_and_test(&amdgpu_fence_slab_ref))
+		kmem_cache_destroy(amdgpu_fence_slab);
 	mutex_lock(&adev->ring_lock);
 	for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
 		struct amdgpu_ring *ring = adev->rings[i];
+
 		if (!ring || !ring->fence_drv.initialized)
 			continue;
 		r = amdgpu_fence_wait_empty(ring);
@@ -568,6 +567,7 @@
 		amdgpu_irq_put(adev, ring->fence_drv.irq_src,
 			       ring->fence_drv.irq_type);
 		amd_sched_fini(&ring->sched);
+		del_timer_sync(&ring->fence_drv.fallback_timer);
 		ring->fence_drv.initialized = false;
 	}
 	mutex_unlock(&adev->ring_lock);
@@ -751,18 +751,25 @@
 	fence->fence_wake.func = amdgpu_fence_check_signaled;
 	__add_wait_queue(&ring->fence_drv.fence_queue, &fence->fence_wake);
 	fence_get(f);
-	amdgpu_fence_schedule_check(ring);
+	if (!timer_pending(&ring->fence_drv.fallback_timer))
+		amdgpu_fence_schedule_fallback(ring);
 	FENCE_TRACE(&fence->base, "armed on ring %i!\n", ring->idx);
 	return true;
 }
 
+static void amdgpu_fence_release(struct fence *f)
+{
+	struct amdgpu_fence *fence = to_amdgpu_fence(f);
+	kmem_cache_free(amdgpu_fence_slab, fence);
+}
+
 const struct fence_ops amdgpu_fence_ops = {
 	.get_driver_name = amdgpu_fence_get_driver_name,
 	.get_timeline_name = amdgpu_fence_get_timeline_name,
 	.enable_signaling = amdgpu_fence_enable_signaling,
 	.signaled = amdgpu_fence_is_signaled,
 	.wait = fence_default_wait,
-	.release = NULL,
+	.release = amdgpu_fence_release,
 };
 
 /*
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 0873328..00c5b58 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -483,6 +483,9 @@
 		if (domain == AMDGPU_GEM_DOMAIN_CPU)
 			goto error_unreserve;
 	}
+	r = amdgpu_vm_update_page_directory(adev, bo_va->vm);
+	if (r)
+		goto error_unreserve;
 
 	r = amdgpu_vm_clear_freed(adev, bo_va->vm);
 	if (r)
@@ -512,6 +515,9 @@
 	struct amdgpu_fpriv *fpriv = filp->driver_priv;
 	struct amdgpu_bo *rbo;
 	struct amdgpu_bo_va *bo_va;
+	struct ttm_validate_buffer tv, tv_pd;
+	struct ww_acquire_ctx ticket;
+	struct list_head list, duplicates;
 	uint32_t invalid_flags, va_flags = 0;
 	int r = 0;
 
@@ -549,7 +555,18 @@
 		return -ENOENT;
 	mutex_lock(&fpriv->vm.mutex);
 	rbo = gem_to_amdgpu_bo(gobj);
-	r = amdgpu_bo_reserve(rbo, false);
+	INIT_LIST_HEAD(&list);
+	INIT_LIST_HEAD(&duplicates);
+	tv.bo = &rbo->tbo;
+	tv.shared = true;
+	list_add(&tv.head, &list);
+
+	if (args->operation == AMDGPU_VA_OP_MAP) {
+		tv_pd.bo = &fpriv->vm.page_directory->tbo;
+		tv_pd.shared = true;
+		list_add(&tv_pd.head, &list);
+	}
+	r = ttm_eu_reserve_buffers(&ticket, &list, true, &duplicates);
 	if (r) {
 		mutex_unlock(&fpriv->vm.mutex);
 		drm_gem_object_unreference_unlocked(gobj);
@@ -558,7 +575,8 @@
 
 	bo_va = amdgpu_vm_bo_find(&fpriv->vm, rbo);
 	if (!bo_va) {
-		amdgpu_bo_unreserve(rbo);
+		ttm_eu_backoff_reservation(&ticket, &list);
+		drm_gem_object_unreference_unlocked(gobj);
 		mutex_unlock(&fpriv->vm.mutex);
 		return -ENOENT;
 	}
@@ -581,7 +599,7 @@
 	default:
 		break;
 	}
-
+	ttm_eu_backoff_reservation(&ticket, &list);
 	if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
 		amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
 	mutex_unlock(&fpriv->vm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index e659877..9e25eda 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -62,7 +62,7 @@
 	int r;
 
 	if (size) {
-		r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
+		r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
 				      &ib->sa_bo, size, 256);
 		if (r) {
 			dev_err(adev->dev, "failed to get a new IB (%d)\n", r);
@@ -216,7 +216,7 @@
 	}
 
 	if (ib->vm)
-		amdgpu_vm_fence(adev, ib->vm, ib->fence);
+		amdgpu_vm_fence(adev, ib->vm, &ib->fence->base);
 
 	amdgpu_ring_unlock_commit(ring);
 	return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 3c2ff45..ea756e7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -189,10 +189,9 @@
 				      struct amdgpu_sa_manager *sa_manager);
 int amdgpu_sa_bo_manager_suspend(struct amdgpu_device *adev,
 					struct amdgpu_sa_manager *sa_manager);
-int amdgpu_sa_bo_new(struct amdgpu_device *adev,
-			    struct amdgpu_sa_manager *sa_manager,
-			    struct amdgpu_sa_bo **sa_bo,
-			    unsigned size, unsigned align);
+int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
+		     struct amdgpu_sa_bo **sa_bo,
+		     unsigned size, unsigned align);
 void amdgpu_sa_bo_free(struct amdgpu_device *adev,
 			      struct amdgpu_sa_bo **sa_bo,
 			      struct fence *fence);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 0212b31..8b88edb 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -311,8 +311,7 @@
 	return false;
 }
 
-int amdgpu_sa_bo_new(struct amdgpu_device *adev,
-		     struct amdgpu_sa_manager *sa_manager,
+int amdgpu_sa_bo_new(struct amdgpu_sa_manager *sa_manager,
 		     struct amdgpu_sa_bo **sa_bo,
 		     unsigned size, unsigned align)
 {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index dcf4a8a..438c052 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -26,6 +26,7 @@
 #include <linux/sched.h>
 #include <drm/drmP.h>
 #include "amdgpu.h"
+#include "amdgpu_trace.h"
 
 static struct fence *amdgpu_sched_dependency(struct amd_sched_job *sched_job)
 {
@@ -44,11 +45,8 @@
 		return NULL;
 	}
 	job = to_amdgpu_job(sched_job);
-	mutex_lock(&job->job_lock);
-	r = amdgpu_ib_schedule(job->adev,
-			       job->num_ibs,
-			       job->ibs,
-			       job->base.owner);
+	trace_amdgpu_sched_run_job(job);
+	r = amdgpu_ib_schedule(job->adev, job->num_ibs, job->ibs, job->owner);
 	if (r) {
 		DRM_ERROR("Error scheduling IBs (%d)\n", r);
 		goto err;
@@ -61,8 +59,6 @@
 	if (job->free_job)
 		job->free_job(job);
 
-	mutex_unlock(&job->job_lock);
-	fence_put(&job->base.s_fence->base);
 	kfree(job);
 	return fence ? &fence->base : NULL;
 }
@@ -88,21 +84,19 @@
 			return -ENOMEM;
 		job->base.sched = &ring->sched;
 		job->base.s_entity = &adev->kernel_ctx.rings[ring->idx].entity;
+		job->base.s_fence = amd_sched_fence_create(job->base.s_entity, owner);
+		if (!job->base.s_fence) {
+			kfree(job);
+			return -ENOMEM;
+		}
+		*f = fence_get(&job->base.s_fence->base);
+
 		job->adev = adev;
 		job->ibs = ibs;
 		job->num_ibs = num_ibs;
-		job->base.owner = owner;
-		mutex_init(&job->job_lock);
+		job->owner = owner;
 		job->free_job = free_job;
-		mutex_lock(&job->job_lock);
-		r = amd_sched_entity_push_job(&job->base);
-		if (r) {
-			mutex_unlock(&job->job_lock);
-			kfree(job);
-			return r;
-		}
-		*f = fence_get(&job->base.s_fence->base);
-		mutex_unlock(&job->job_lock);
+		amd_sched_entity_push_job(&job->base);
 	} else {
 		r = amdgpu_ib_schedule(adev, num_ibs, ibs, owner);
 		if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
index ff3ca52..1caaf20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_semaphore.c
@@ -40,7 +40,7 @@
 	if (*semaphore == NULL) {
 		return -ENOMEM;
 	}
-	r = amdgpu_sa_bo_new(adev, &adev->ring_tmp_bo,
+	r = amdgpu_sa_bo_new(&adev->ring_tmp_bo,
 			     &(*semaphore)->sa_bo, 8, 8);
 	if (r) {
 		kfree(*semaphore);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
index a6697fd..dd005c3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sync.c
@@ -302,8 +302,14 @@
 			return -EINVAL;
 		}
 
-		if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores ||
-		    (count >= AMDGPU_NUM_SYNCS)) {
+		if (amdgpu_enable_scheduler || !amdgpu_enable_semaphores) {
+			r = fence_wait(&fence->base, true);
+			if (r)
+				return r;
+			continue;
+		}
+
+		if (count >= AMDGPU_NUM_SYNCS) {
 			/* not enough room, wait manually */
 			r = fence_wait(&fence->base, false);
 			if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 76ecbaf..8f9834ab 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -48,6 +48,57 @@
 		      __entry->fences)
 );
 
+TRACE_EVENT(amdgpu_cs_ioctl,
+	    TP_PROTO(struct amdgpu_job *job),
+	    TP_ARGS(job),
+	    TP_STRUCT__entry(
+			     __field(struct amdgpu_device *, adev)
+			     __field(struct amd_sched_job *, sched_job)
+			     __field(struct amdgpu_ib *, ib)
+			     __field(struct fence *, fence)
+			     __field(char *, ring_name)
+			     __field(u32, num_ibs)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->adev = job->adev;
+			   __entry->sched_job = &job->base;
+			   __entry->ib = job->ibs;
+			   __entry->fence = &job->base.s_fence->base;
+			   __entry->ring_name = job->ibs[0].ring->name;
+			   __entry->num_ibs = job->num_ibs;
+			   ),
+	    TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
+		      __entry->adev, __entry->sched_job, __entry->ib,
+		      __entry->fence, __entry->ring_name, __entry->num_ibs)
+);
+
+TRACE_EVENT(amdgpu_sched_run_job,
+	    TP_PROTO(struct amdgpu_job *job),
+	    TP_ARGS(job),
+	    TP_STRUCT__entry(
+			     __field(struct amdgpu_device *, adev)
+			     __field(struct amd_sched_job *, sched_job)
+			     __field(struct amdgpu_ib *, ib)
+			     __field(struct fence *, fence)
+			     __field(char *, ring_name)
+			     __field(u32, num_ibs)
+			     ),
+
+	    TP_fast_assign(
+			   __entry->adev = job->adev;
+			   __entry->sched_job = &job->base;
+			   __entry->ib = job->ibs;
+			   __entry->fence = &job->base.s_fence->base;
+			   __entry->ring_name = job->ibs[0].ring->name;
+			   __entry->num_ibs = job->num_ibs;
+			   ),
+	    TP_printk("adev=%p, sched_job=%p, first ib=%p, sched fence=%p, ring name:%s, num_ibs:%u",
+		      __entry->adev, __entry->sched_job, __entry->ib,
+		      __entry->fence, __entry->ring_name, __entry->num_ibs)
+);
+
+
 TRACE_EVENT(amdgpu_vm_grab_id,
 	    TP_PROTO(unsigned vmid, int ring),
 	    TP_ARGS(vmid, ring),
@@ -196,49 +247,6 @@
 	    TP_printk("list=%p, bo=%p", __entry->list, __entry->bo)
 );
 
-DECLARE_EVENT_CLASS(amdgpu_fence_request,
-
-	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
-	    TP_ARGS(dev, ring, seqno),
-
-	    TP_STRUCT__entry(
-			     __field(u32, dev)
-			     __field(int, ring)
-			     __field(u32, seqno)
-			     ),
-
-	    TP_fast_assign(
-			   __entry->dev = dev->primary->index;
-			   __entry->ring = ring;
-			   __entry->seqno = seqno;
-			   ),
-
-	    TP_printk("dev=%u, ring=%d, seqno=%u",
-		      __entry->dev, __entry->ring, __entry->seqno)
-);
-
-DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_emit,
-
-	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
-	    TP_ARGS(dev, ring, seqno)
-);
-
-DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_begin,
-
-	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
-	    TP_ARGS(dev, ring, seqno)
-);
-
-DEFINE_EVENT(amdgpu_fence_request, amdgpu_fence_wait_end,
-
-	    TP_PROTO(struct drm_device *dev, int ring, u32 seqno),
-
-	    TP_ARGS(dev, ring, seqno)
-);
-
 DECLARE_EVENT_CLASS(amdgpu_semaphore_request,
 
 	    TP_PROTO(int ring, struct amdgpu_semaphore *sem),
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 81bb8e9..d4bac5f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -1073,10 +1073,10 @@
 	ret = drm_mm_dump_table(m, mm);
 	spin_unlock(&glob->lru_lock);
 	if (ttm_pl == TTM_PL_VRAM)
-		seq_printf(m, "man size:%llu pages, ram usage:%luMB, vis usage:%luMB\n",
+		seq_printf(m, "man size:%llu pages, ram usage:%lluMB, vis usage:%lluMB\n",
 			   adev->mman.bdev.man[ttm_pl].size,
-			   atomic64_read(&adev->vram_usage) >> 20,
-			   atomic64_read(&adev->vram_vis_usage) >> 20);
+			   (u64)atomic64_read(&adev->vram_usage) >> 20,
+			   (u64)atomic64_read(&adev->vram_vis_usage) >> 20);
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 633a32a..159ce54 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -143,10 +143,15 @@
 	unsigned i;
 
 	/* check if the id is still valid */
-	if (vm_id->id && vm_id->last_id_use &&
-	    vm_id->last_id_use == adev->vm_manager.active[vm_id->id]) {
-		trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
-		return 0;
+	if (vm_id->id) {
+		unsigned id = vm_id->id;
+		long owner;
+
+		owner = atomic_long_read(&adev->vm_manager.ids[id].owner);
+		if (owner == (long)vm) {
+			trace_amdgpu_vm_grab_id(vm_id->id, ring->idx);
+			return 0;
+		}
 	}
 
 	/* we definately need to flush */
@@ -154,7 +159,7 @@
 
 	/* skip over VMID 0, since it is the system VM */
 	for (i = 1; i < adev->vm_manager.nvm; ++i) {
-		struct fence *fence = adev->vm_manager.active[i];
+		struct fence *fence = adev->vm_manager.ids[i].active;
 		struct amdgpu_ring *fring;
 
 		if (fence == NULL) {
@@ -176,7 +181,7 @@
 		if (choices[i]) {
 			struct fence *fence;
 
-			fence  = adev->vm_manager.active[choices[i]];
+			fence  = adev->vm_manager.ids[choices[i]].active;
 			vm_id->id = choices[i];
 
 			trace_amdgpu_vm_grab_id(choices[i], ring->idx);
@@ -207,24 +212,21 @@
 	uint64_t pd_addr = amdgpu_bo_gpu_offset(vm->page_directory);
 	struct amdgpu_vm_id *vm_id = &vm->ids[ring->idx];
 	struct fence *flushed_updates = vm_id->flushed_updates;
-	bool is_earlier = false;
+	bool is_later;
 
-	if (flushed_updates && updates) {
-		BUG_ON(flushed_updates->context != updates->context);
-		is_earlier = (updates->seqno - flushed_updates->seqno <=
-			      INT_MAX) ? true : false;
-	}
+	if (!flushed_updates)
+		is_later = true;
+	else if (!updates)
+		is_later = false;
+	else
+		is_later = fence_is_later(updates, flushed_updates);
 
-	if (pd_addr != vm_id->pd_gpu_addr || !flushed_updates ||
-	    is_earlier) {
-
+	if (pd_addr != vm_id->pd_gpu_addr || is_later) {
 		trace_amdgpu_vm_flush(pd_addr, ring->idx, vm_id->id);
-		if (is_earlier) {
+		if (is_later) {
 			vm_id->flushed_updates = fence_get(updates);
 			fence_put(flushed_updates);
 		}
-		if (!flushed_updates)
-			vm_id->flushed_updates = fence_get(updates);
 		vm_id->pd_gpu_addr = pd_addr;
 		amdgpu_ring_emit_vm_flush(ring, vm_id->id, vm_id->pd_gpu_addr);
 	}
@@ -244,16 +246,14 @@
  */
 void amdgpu_vm_fence(struct amdgpu_device *adev,
 		     struct amdgpu_vm *vm,
-		     struct amdgpu_fence *fence)
+		     struct fence *fence)
 {
-	unsigned ridx = fence->ring->idx;
-	unsigned vm_id = vm->ids[ridx].id;
+	struct amdgpu_ring *ring = amdgpu_ring_from_fence(fence);
+	unsigned vm_id = vm->ids[ring->idx].id;
 
-	fence_put(adev->vm_manager.active[vm_id]);
-	adev->vm_manager.active[vm_id] = fence_get(&fence->base);
-
-	fence_put(vm->ids[ridx].last_id_use);
-	vm->ids[ridx].last_id_use = fence_get(&fence->base);
+	fence_put(adev->vm_manager.ids[vm_id].active);
+	adev->vm_manager.ids[vm_id].active = fence_get(fence);
+	atomic_long_set(&adev->vm_manager.ids[vm_id].owner, (long)vm);
 }
 
 /**
@@ -332,6 +332,8 @@
  *
  * @adev: amdgpu_device pointer
  * @bo: bo to clear
+ *
+ * need to reserve bo first before calling it.
  */
 static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
 			      struct amdgpu_bo *bo)
@@ -343,24 +345,20 @@
 	uint64_t addr;
 	int r;
 
-	r = amdgpu_bo_reserve(bo, false);
-	if (r)
-		return r;
-
 	r = reservation_object_reserve_shared(bo->tbo.resv);
 	if (r)
 		return r;
 
 	r = ttm_bo_validate(&bo->tbo, &bo->placement, true, false);
 	if (r)
-		goto error_unreserve;
+		goto error;
 
 	addr = amdgpu_bo_gpu_offset(bo);
 	entries = amdgpu_bo_size(bo) / 8;
 
 	ib = kzalloc(sizeof(struct amdgpu_ib), GFP_KERNEL);
 	if (!ib)
-		goto error_unreserve;
+		goto error;
 
 	r = amdgpu_ib_get(ring, NULL, entries * 2 + 64, ib);
 	if (r)
@@ -378,16 +376,14 @@
 	if (!r)
 		amdgpu_bo_fence(bo, fence, true);
 	fence_put(fence);
-	if (amdgpu_enable_scheduler) {
-		amdgpu_bo_unreserve(bo);
+	if (amdgpu_enable_scheduler)
 		return 0;
-	}
+
 error_free:
 	amdgpu_ib_free(adev, ib);
 	kfree(ib);
 
-error_unreserve:
-	amdgpu_bo_unreserve(bo);
+error:
 	return r;
 }
 
@@ -989,7 +985,7 @@
  * Add a mapping of the BO at the specefied addr into the VM.
  * Returns 0 for success, error for failure.
  *
- * Object has to be reserved and gets unreserved by this function!
+ * Object has to be reserved and unreserved outside!
  */
 int amdgpu_vm_bo_map(struct amdgpu_device *adev,
 		     struct amdgpu_bo_va *bo_va,
@@ -1005,30 +1001,27 @@
 
 	/* validate the parameters */
 	if (saddr & AMDGPU_GPU_PAGE_MASK || offset & AMDGPU_GPU_PAGE_MASK ||
-	    size == 0 || size & AMDGPU_GPU_PAGE_MASK) {
-		amdgpu_bo_unreserve(bo_va->bo);
+	    size == 0 || size & AMDGPU_GPU_PAGE_MASK)
 		return -EINVAL;
-	}
 
 	/* make sure object fit at this offset */
 	eaddr = saddr + size;
-	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo))) {
-		amdgpu_bo_unreserve(bo_va->bo);
+	if ((saddr >= eaddr) || (offset + size > amdgpu_bo_size(bo_va->bo)))
 		return -EINVAL;
-	}
 
 	last_pfn = eaddr / AMDGPU_GPU_PAGE_SIZE;
 	if (last_pfn > adev->vm_manager.max_pfn) {
 		dev_err(adev->dev, "va above limit (0x%08X > 0x%08X)\n",
 			last_pfn, adev->vm_manager.max_pfn);
-		amdgpu_bo_unreserve(bo_va->bo);
 		return -EINVAL;
 	}
 
 	saddr /= AMDGPU_GPU_PAGE_SIZE;
 	eaddr /= AMDGPU_GPU_PAGE_SIZE;
 
+	spin_lock(&vm->it_lock);
 	it = interval_tree_iter_first(&vm->va, saddr, eaddr - 1);
+	spin_unlock(&vm->it_lock);
 	if (it) {
 		struct amdgpu_bo_va_mapping *tmp;
 		tmp = container_of(it, struct amdgpu_bo_va_mapping, it);
@@ -1036,14 +1029,12 @@
 		dev_err(adev->dev, "bo %p va 0x%010Lx-0x%010Lx conflict with "
 			"0x%010lx-0x%010lx\n", bo_va->bo, saddr, eaddr,
 			tmp->it.start, tmp->it.last + 1);
-		amdgpu_bo_unreserve(bo_va->bo);
 		r = -EINVAL;
 		goto error;
 	}
 
 	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
 	if (!mapping) {
-		amdgpu_bo_unreserve(bo_va->bo);
 		r = -ENOMEM;
 		goto error;
 	}
@@ -1055,7 +1046,9 @@
 	mapping->flags = flags;
 
 	list_add(&mapping->list, &bo_va->invalids);
+	spin_lock(&vm->it_lock);
 	interval_tree_insert(&mapping->it, &vm->va);
+	spin_unlock(&vm->it_lock);
 	trace_amdgpu_vm_bo_map(bo_va, mapping);
 
 	/* Make sure the page tables are allocated */
@@ -1067,8 +1060,6 @@
 	if (eaddr > vm->max_pde_used)
 		vm->max_pde_used = eaddr;
 
-	amdgpu_bo_unreserve(bo_va->bo);
-
 	/* walk over the address space and allocate the page tables */
 	for (pt_idx = saddr; pt_idx <= eaddr; ++pt_idx) {
 		struct reservation_object *resv = vm->page_directory->tbo.resv;
@@ -1077,13 +1068,11 @@
 		if (vm->page_tables[pt_idx].bo)
 			continue;
 
-		ww_mutex_lock(&resv->lock, NULL);
 		r = amdgpu_bo_create(adev, AMDGPU_VM_PTE_COUNT * 8,
 				     AMDGPU_GPU_PAGE_SIZE, true,
 				     AMDGPU_GEM_DOMAIN_VRAM,
 				     AMDGPU_GEM_CREATE_NO_CPU_ACCESS,
 				     NULL, resv, &pt);
-		ww_mutex_unlock(&resv->lock);
 		if (r)
 			goto error_free;
 
@@ -1101,7 +1090,9 @@
 
 error_free:
 	list_del(&mapping->list);
+	spin_lock(&vm->it_lock);
 	interval_tree_remove(&mapping->it, &vm->va);
+	spin_unlock(&vm->it_lock);
 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 	kfree(mapping);
 
@@ -1119,7 +1110,7 @@
  * Remove a mapping of the BO at the specefied addr from the VM.
  * Returns 0 for success, error for failure.
  *
- * Object has to be reserved and gets unreserved by this function!
+ * Object has to be reserved and unreserved outside!
  */
 int amdgpu_vm_bo_unmap(struct amdgpu_device *adev,
 		       struct amdgpu_bo_va *bo_va,
@@ -1144,21 +1135,20 @@
 				break;
 		}
 
-		if (&mapping->list == &bo_va->invalids) {
-			amdgpu_bo_unreserve(bo_va->bo);
+		if (&mapping->list == &bo_va->invalids)
 			return -ENOENT;
-		}
 	}
 
 	list_del(&mapping->list);
+	spin_lock(&vm->it_lock);
 	interval_tree_remove(&mapping->it, &vm->va);
+	spin_unlock(&vm->it_lock);
 	trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 
 	if (valid)
 		list_add(&mapping->list, &vm->freed);
 	else
 		kfree(mapping);
-	amdgpu_bo_unreserve(bo_va->bo);
 
 	return 0;
 }
@@ -1187,13 +1177,17 @@
 
 	list_for_each_entry_safe(mapping, next, &bo_va->valids, list) {
 		list_del(&mapping->list);
+		spin_lock(&vm->it_lock);
 		interval_tree_remove(&mapping->it, &vm->va);
+		spin_unlock(&vm->it_lock);
 		trace_amdgpu_vm_bo_unmap(bo_va, mapping);
 		list_add(&mapping->list, &vm->freed);
 	}
 	list_for_each_entry_safe(mapping, next, &bo_va->invalids, list) {
 		list_del(&mapping->list);
+		spin_lock(&vm->it_lock);
 		interval_tree_remove(&mapping->it, &vm->va);
+		spin_unlock(&vm->it_lock);
 		kfree(mapping);
 	}
 
@@ -1241,7 +1235,6 @@
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
 		vm->ids[i].id = 0;
 		vm->ids[i].flushed_updates = NULL;
-		vm->ids[i].last_id_use = NULL;
 	}
 	mutex_init(&vm->mutex);
 	vm->va = RB_ROOT;
@@ -1249,7 +1242,7 @@
 	INIT_LIST_HEAD(&vm->invalidated);
 	INIT_LIST_HEAD(&vm->cleared);
 	INIT_LIST_HEAD(&vm->freed);
-
+	spin_lock_init(&vm->it_lock);
 	pd_size = amdgpu_vm_directory_size(adev);
 	pd_entries = amdgpu_vm_num_pdes(adev);
 
@@ -1269,8 +1262,14 @@
 			     NULL, NULL, &vm->page_directory);
 	if (r)
 		return r;
-
+	r = amdgpu_bo_reserve(vm->page_directory, false);
+	if (r) {
+		amdgpu_bo_unref(&vm->page_directory);
+		vm->page_directory = NULL;
+		return r;
+	}
 	r = amdgpu_vm_clear_bo(adev, vm->page_directory);
+	amdgpu_bo_unreserve(vm->page_directory);
 	if (r) {
 		amdgpu_bo_unref(&vm->page_directory);
 		vm->page_directory = NULL;
@@ -1313,11 +1312,28 @@
 
 	amdgpu_bo_unref(&vm->page_directory);
 	fence_put(vm->page_directory_fence);
-
 	for (i = 0; i < AMDGPU_MAX_RINGS; ++i) {
+		unsigned id = vm->ids[i].id;
+
+		atomic_long_cmpxchg(&adev->vm_manager.ids[id].owner,
+				    (long)vm, 0);
 		fence_put(vm->ids[i].flushed_updates);
-		fence_put(vm->ids[i].last_id_use);
 	}
 
 	mutex_destroy(&vm->mutex);
 }
+
+/**
+ * amdgpu_vm_manager_fini - cleanup VM manager
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Cleanup the VM manager and free resources.
+ */
+void amdgpu_vm_manager_fini(struct amdgpu_device *adev)
+{
+	unsigned i;
+
+	for (i = 0; i < AMDGPU_NUM_VM; ++i)
+		fence_put(adev->vm_manager.ids[i].active);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index a1a35a5..57a2e34 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -6569,12 +6569,12 @@
 		switch (state) {
 		case AMDGPU_IRQ_STATE_DISABLE:
 			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
-			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
 			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
 			break;
 		case AMDGPU_IRQ_STATE_ENABLE:
 			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
-			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
+			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTH_MASK_MASK;
 			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
 			break;
 		default:
@@ -6586,12 +6586,12 @@
 		switch (state) {
 		case AMDGPU_IRQ_STATE_DISABLE:
 			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
-			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
 			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
 			break;
 		case AMDGPU_IRQ_STATE_ENABLE:
 			cg_thermal_int = RREG32_SMC(ixCG_THERMAL_INT);
-			cg_thermal_int |= CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
+			cg_thermal_int &= ~CG_THERMAL_INT_CTRL__THERM_INTL_MASK_MASK;
 			WREG32_SMC(ixCG_THERMAL_INT, cg_thermal_int);
 			break;
 		default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 6776cf7..e1dcab9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -268,7 +268,6 @@
 	mmCGTT_CP_CLK_CTRL, 0xffffffff, 0x00000100,
 	mmCGTT_CPC_CLK_CTRL, 0xffffffff, 0x00000100,
 	mmCGTT_CPF_CLK_CTRL, 0xffffffff, 0x40000100,
-	mmCGTT_DRM_CLK_CTRL0, 0xffffffff, 0x00600100,
 	mmCGTT_GDS_CLK_CTRL, 0xffffffff, 0x00000100,
 	mmCGTT_IA_CLK_CTRL, 0xffffffff, 0x06000100,
 	mmCGTT_PA_CLK_CTRL, 0xffffffff, 0x00000100,
@@ -296,10 +295,6 @@
 	mmCGTS_SM_CTRL_REG, 0xffffffff, 0x96e00200,
 	mmCP_RB_WPTR_POLL_CNTL, 0xffffffff, 0x00900100,
 	mmRLC_CGCG_CGLS_CTRL, 0xffffffff, 0x0020003c,
-	mmPCIE_INDEX, 0xffffffff, 0x0140001c,
-	mmPCIE_DATA, 0x000f0000, 0x00000000,
-	mmCGTT_DRM_CLK_CTRL0, 0xff000fff, 0x00000100,
-	mmHDP_XDP_CGTT_BLK_CTRL, 0xc0000fff, 0x00000104,
 	mmCP_MEM_SLP_CNTL, 0x00000001, 0x00000001,
 };
 
@@ -1000,7 +995,7 @@
 		adev->gfx.config.max_cu_per_sh = 16;
 		adev->gfx.config.max_sh_per_se = 1;
 		adev->gfx.config.max_backends_per_se = 4;
-		adev->gfx.config.max_texture_channel_caches = 8;
+		adev->gfx.config.max_texture_channel_caches = 16;
 		adev->gfx.config.max_gprs = 256;
 		adev->gfx.config.max_gs_threads = 32;
 		adev->gfx.config.max_hw_contexts = 8;
@@ -1613,6 +1608,296 @@
 			WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
 		}
 	case CHIP_FIJI:
+		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_64B) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+				break;
+			case 1:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_128B) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+				break;
+			case 2:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_256B) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+				break;
+			case 3:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_512B) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+				break;
+			case 4:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+				break;
+			case 5:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+				break;
+			case 6:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+				break;
+			case 7:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+						TILE_SPLIT(ADDR_SURF_TILE_SPLIT_2KB) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DEPTH_MICRO_TILING));
+				break;
+			case 8:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_LINEAR_ALIGNED) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16));
+				break;
+			case 9:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 10:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 11:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+				break;
+			case 12:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_DISPLAY_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+				break;
+			case 13:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 14:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 15:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 16:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+				break;
+			case 17:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+				break;
+			case 18:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+				break;
+			case 19:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THICK) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+				break;
+			case 20:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+				break;
+			case 21:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_THICK) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+				break;
+			case 22:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+				break;
+			case 23:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THICK) |
+						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+				break;
+			case 24:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THICK) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THIN_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+				break;
+			case 25:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_XTHICK) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+				break;
+			case 26:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_3D_TILED_XTHICK) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_THICK_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_1));
+				break;
+			case 27:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_1D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 28:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_2D_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_2));
+				break;
+			case 29:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P16_32x32_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+				break;
+			case 30:
+				gb_tile_moden = (ARRAY_MODE(ARRAY_PRT_TILED_THIN1) |
+						PIPE_CONFIG(ADDR_SURF_P4_16x16) |
+						MICRO_TILE_MODE_NEW(ADDR_SURF_ROTATED_MICRO_TILING) |
+						SAMPLE_SPLIT(ADDR_SURF_SAMPLE_SPLIT_8));
+				break;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			adev->gfx.config.tile_mode_array[reg_offset] = gb_tile_moden;
+			WREG32(mmGB_TILE_MODE0 + reg_offset, gb_tile_moden);
+		}
+		for (reg_offset = 0; reg_offset < num_secondary_tile_mode_states; reg_offset++) {
+			switch (reg_offset) {
+			case 0:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 1:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 2:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 3:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 4:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 5:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 6:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 8:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_8) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 9:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_4) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 10:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 11:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 12:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_2) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 13:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_2) |
+						NUM_BANKS(ADDR_SURF_8_BANK));
+				break;
+			case 14:
+				gb_tile_moden = (BANK_WIDTH(ADDR_SURF_BANK_WIDTH_1) |
+						BANK_HEIGHT(ADDR_SURF_BANK_HEIGHT_1) |
+						MACRO_TILE_ASPECT(ADDR_SURF_MACRO_ASPECT_1) |
+						NUM_BANKS(ADDR_SURF_4_BANK));
+				break;
+			case 7:
+				/* unused idx */
+				continue;
+			default:
+				gb_tile_moden = 0;
+				break;
+			}
+			adev->gfx.config.macrotile_mode_array[reg_offset] = gb_tile_moden;
+			WREG32(mmGB_MACROTILE_MODE0 + reg_offset, gb_tile_moden);
+		}
+		break;
 	case CHIP_TONGA:
 		for (reg_offset = 0; reg_offset < num_tile_mode_states; reg_offset++) {
 			switch (reg_offset) {
@@ -2971,10 +3256,13 @@
 	amdgpu_ring_write(ring, mmPA_SC_RASTER_CONFIG - PACKET3_SET_CONTEXT_REG_START);
 	switch (adev->asic_type) {
 	case CHIP_TONGA:
-	case CHIP_FIJI:
 		amdgpu_ring_write(ring, 0x16000012);
 		amdgpu_ring_write(ring, 0x0000002A);
 		break;
+	case CHIP_FIJI:
+		amdgpu_ring_write(ring, 0x3a00161a);
+		amdgpu_ring_write(ring, 0x0000002e);
+		break;
 	case CHIP_TOPAZ:
 	case CHIP_CARRIZO:
 		amdgpu_ring_write(ring, 0x00000002);
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 85bbcdc..7427d8c 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -40,7 +40,7 @@
 static void gmc_v7_0_set_gart_funcs(struct amdgpu_device *adev);
 static void gmc_v7_0_set_irq_funcs(struct amdgpu_device *adev);
 
-MODULE_FIRMWARE("radeon/boniare_mc.bin");
+MODULE_FIRMWARE("radeon/bonaire_mc.bin");
 MODULE_FIRMWARE("radeon/hawaii_mc.bin");
 
 /**
@@ -501,6 +501,7 @@
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 	WREG32(mmVM_L2_CNTL, tmp);
 	tmp = REG_SET_FIELD(0, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_L2_CACHE, 1);
@@ -960,12 +961,10 @@
 
 static int gmc_v7_0_sw_fini(void *handle)
 {
-	int i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	if (adev->vm_manager.enabled) {
-		for (i = 0; i < AMDGPU_NUM_VM; ++i)
-			fence_put(adev->vm_manager.active[i]);
+		amdgpu_vm_manager_fini(adev);
 		gmc_v7_0_vm_fini(adev);
 		adev->vm_manager.enabled = false;
 	}
@@ -1010,12 +1009,10 @@
 
 static int gmc_v7_0_suspend(void *handle)
 {
-	int i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	if (adev->vm_manager.enabled) {
-		for (i = 0; i < AMDGPU_NUM_VM; ++i)
-			fence_put(adev->vm_manager.active[i]);
+		amdgpu_vm_manager_fini(adev);
 		gmc_v7_0_vm_fini(adev);
 		adev->vm_manager.enabled = false;
 	}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 1bcc4e7..cb0e50e 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -629,6 +629,7 @@
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_L2_PDE0_CACHE_LRU_UPDATE_BY_WRITE, 1);
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, EFFECTIVE_L2_QUEUE_SIZE, 7);
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, CONTEXT1_IDENTITY_ACCESS_MODE, 1);
+	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL, ENABLE_DEFAULT_PAGE_OUT_TO_SYSTEM_MEMORY, 1);
 	WREG32(mmVM_L2_CNTL, tmp);
 	tmp = RREG32(mmVM_L2_CNTL2);
 	tmp = REG_SET_FIELD(tmp, VM_L2_CNTL2, INVALIDATE_ALL_L1_TLBS, 1);
@@ -979,12 +980,10 @@
 
 static int gmc_v8_0_sw_fini(void *handle)
 {
-	int i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	if (adev->vm_manager.enabled) {
-		for (i = 0; i < AMDGPU_NUM_VM; ++i)
-			fence_put(adev->vm_manager.active[i]);
+		amdgpu_vm_manager_fini(adev);
 		gmc_v8_0_vm_fini(adev);
 		adev->vm_manager.enabled = false;
 	}
@@ -1031,12 +1030,10 @@
 
 static int gmc_v8_0_suspend(void *handle)
 {
-	int i;
 	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
 
 	if (adev->vm_manager.enabled) {
-		for (i = 0; i < AMDGPU_NUM_VM; ++i)
-			fence_put(adev->vm_manager.active[i]);
+		amdgpu_vm_manager_fini(adev);
 		gmc_v8_0_vm_fini(adev);
 		adev->vm_manager.enabled = false;
 	}
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
index 144f50a..c89dc77 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_sched_trace.h
@@ -16,6 +16,8 @@
 	    TP_ARGS(sched_job),
 	    TP_STRUCT__entry(
 			     __field(struct amd_sched_entity *, entity)
+			     __field(struct amd_sched_job *, sched_job)
+			     __field(struct fence *, fence)
 			     __field(const char *, name)
 			     __field(u32, job_count)
 			     __field(int, hw_job_count)
@@ -23,16 +25,32 @@
 
 	    TP_fast_assign(
 			   __entry->entity = sched_job->s_entity;
+			   __entry->sched_job = sched_job;
+			   __entry->fence = &sched_job->s_fence->base;
 			   __entry->name = sched_job->sched->name;
 			   __entry->job_count = kfifo_len(
 				   &sched_job->s_entity->job_queue) / sizeof(sched_job);
 			   __entry->hw_job_count = atomic_read(
 				   &sched_job->sched->hw_rq_count);
 			   ),
-	    TP_printk("entity=%p, ring=%s, job count:%u, hw job count:%d",
-		      __entry->entity, __entry->name, __entry->job_count,
-		      __entry->hw_job_count)
+	    TP_printk("entity=%p, sched job=%p, fence=%p, ring=%s, job count:%u, hw job count:%d",
+		      __entry->entity, __entry->sched_job, __entry->fence, __entry->name,
+		      __entry->job_count, __entry->hw_job_count)
 );
+
+TRACE_EVENT(amd_sched_process_job,
+	    TP_PROTO(struct amd_sched_fence *fence),
+	    TP_ARGS(fence),
+	    TP_STRUCT__entry(
+		    __field(struct fence *, fence)
+		    ),
+
+	    TP_fast_assign(
+		    __entry->fence = &fence->base;
+		    ),
+	    TP_printk("fence=%p signaled", __entry->fence)
+);
+
 #endif
 
 /* This part must be outside protection */
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
index 89619a5..ea30d6a 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.c
@@ -34,6 +34,9 @@
 amd_sched_entity_pop_job(struct amd_sched_entity *entity);
 static void amd_sched_wakeup(struct amd_gpu_scheduler *sched);
 
+struct kmem_cache *sched_fence_slab;
+atomic_t sched_fence_slab_ref = ATOMIC_INIT(0);
+
 /* Initialize a given run queue struct */
 static void amd_sched_rq_init(struct amd_sched_rq *rq)
 {
@@ -273,22 +276,13 @@
  *
  * Returns 0 for success, negative error code otherwise.
  */
-int amd_sched_entity_push_job(struct amd_sched_job *sched_job)
+void amd_sched_entity_push_job(struct amd_sched_job *sched_job)
 {
 	struct amd_sched_entity *entity = sched_job->s_entity;
-	struct amd_sched_fence *fence = amd_sched_fence_create(
-		entity, sched_job->owner);
-
-	if (!fence)
-		return -ENOMEM;
-
-	fence_get(&fence->base);
-	sched_job->s_fence = fence;
 
 	wait_event(entity->sched->job_scheduled,
 		   amd_sched_entity_in(sched_job));
 	trace_amd_sched_job(sched_job);
-	return 0;
 }
 
 /**
@@ -343,6 +337,7 @@
 		list_del_init(&s_fence->list);
 		spin_unlock_irqrestore(&sched->fence_list_lock, flags);
 	}
+	trace_amd_sched_process_job(s_fence);
 	fence_put(&s_fence->base);
 	wake_up_interruptible(&sched->wake_up_worker);
 }
@@ -450,6 +445,13 @@
 	init_waitqueue_head(&sched->wake_up_worker);
 	init_waitqueue_head(&sched->job_scheduled);
 	atomic_set(&sched->hw_rq_count, 0);
+	if (atomic_inc_return(&sched_fence_slab_ref) == 1) {
+		sched_fence_slab = kmem_cache_create(
+			"amd_sched_fence", sizeof(struct amd_sched_fence), 0,
+			SLAB_HWCACHE_ALIGN, NULL);
+		if (!sched_fence_slab)
+			return -ENOMEM;
+	}
 
 	/* Each scheduler will run on a seperate kernel thread */
 	sched->thread = kthread_run(amd_sched_main, sched, sched->name);
@@ -470,4 +472,6 @@
 {
 	if (sched->thread)
 		kthread_stop(sched->thread);
+	if (atomic_dec_and_test(&sched_fence_slab_ref))
+		kmem_cache_destroy(sched_fence_slab);
 }
diff --git a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
index 929e9ac..939692b 100644
--- a/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
+++ b/drivers/gpu/drm/amd/scheduler/gpu_scheduler.h
@@ -30,6 +30,9 @@
 struct amd_gpu_scheduler;
 struct amd_sched_rq;
 
+extern struct kmem_cache *sched_fence_slab;
+extern atomic_t sched_fence_slab_ref;
+
 /**
  * A scheduler entity is a wrapper around a job queue or a group
  * of other entities. Entities take turns emitting jobs from their 
@@ -76,7 +79,6 @@
 	struct amd_gpu_scheduler        *sched;
 	struct amd_sched_entity         *s_entity;
 	struct amd_sched_fence          *s_fence;
-	void		                *owner;
 };
 
 extern const struct fence_ops amd_sched_fence_ops;
@@ -128,7 +130,7 @@
 			  uint32_t jobs);
 void amd_sched_entity_fini(struct amd_gpu_scheduler *sched,
 			   struct amd_sched_entity *entity);
-int amd_sched_entity_push_job(struct amd_sched_job *sched_job);
+void amd_sched_entity_push_job(struct amd_sched_job *sched_job);
 
 struct amd_sched_fence *amd_sched_fence_create(
 	struct amd_sched_entity *s_entity, void *owner);
diff --git a/drivers/gpu/drm/amd/scheduler/sched_fence.c b/drivers/gpu/drm/amd/scheduler/sched_fence.c
index d802638..8d2130b 100644
--- a/drivers/gpu/drm/amd/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/amd/scheduler/sched_fence.c
@@ -32,7 +32,7 @@
 	struct amd_sched_fence *fence = NULL;
 	unsigned seq;
 
-	fence = kzalloc(sizeof(struct amd_sched_fence), GFP_KERNEL);
+	fence = kmem_cache_zalloc(sched_fence_slab, GFP_KERNEL);
 	if (fence == NULL)
 		return NULL;
 	fence->owner = owner;
@@ -71,11 +71,17 @@
 	return true;
 }
 
+static void amd_sched_fence_release(struct fence *f)
+{
+	struct amd_sched_fence *fence = to_amd_sched_fence(f);
+	kmem_cache_free(sched_fence_slab, fence);
+}
+
 const struct fence_ops amd_sched_fence_ops = {
 	.get_driver_name = amd_sched_fence_get_driver_name,
 	.get_timeline_name = amd_sched_fence_get_timeline_name,
 	.enable_signaling = amd_sched_fence_enable_signaling,
 	.signaled = NULL,
 	.wait = fence_default_wait,
-	.release = NULL,
+	.release = amd_sched_fence_release,
 };
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 7bb3845..aeee083 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -1432,6 +1432,45 @@
 	return ret;
 }
 
+/**
+ * drm_atomic_update_old_fb -- Unset old_fb pointers and set plane->fb pointers.
+ *
+ * @dev: drm device to check.
+ * @plane_mask: plane mask for planes that were updated.
+ * @ret: return value, can be -EDEADLK for a retry.
+ *
+ * Before doing an update plane->old_fb is set to plane->fb,
+ * but before dropping the locks old_fb needs to be set to NULL
+ * and plane->fb updated. This is a common operation for each
+ * atomic update, so this call is split off as a helper.
+ */
+void drm_atomic_clean_old_fb(struct drm_device *dev,
+			     unsigned plane_mask,
+			     int ret)
+{
+	struct drm_plane *plane;
+
+	/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
+	 * locks (ie. while it is still safe to deref plane->state).  We
+	 * need to do this here because the driver entry points cannot
+	 * distinguish between legacy and atomic ioctls.
+	 */
+	drm_for_each_plane_mask(plane, dev, plane_mask) {
+		if (ret == 0) {
+			struct drm_framebuffer *new_fb = plane->state->fb;
+			if (new_fb)
+				drm_framebuffer_reference(new_fb);
+			plane->fb = new_fb;
+			plane->crtc = plane->state->crtc;
+
+			if (plane->old_fb)
+				drm_framebuffer_unreference(plane->old_fb);
+		}
+		plane->old_fb = NULL;
+	}
+}
+EXPORT_SYMBOL(drm_atomic_clean_old_fb);
+
 int drm_mode_atomic_ioctl(struct drm_device *dev,
 			  void *data, struct drm_file *file_priv)
 {
@@ -1446,7 +1485,7 @@
 	struct drm_plane *plane;
 	struct drm_crtc *crtc;
 	struct drm_crtc_state *crtc_state;
-	unsigned plane_mask = 0;
+	unsigned plane_mask;
 	int ret = 0;
 	unsigned int i, j;
 
@@ -1486,6 +1525,7 @@
 	state->allow_modeset = !!(arg->flags & DRM_MODE_ATOMIC_ALLOW_MODESET);
 
 retry:
+	plane_mask = 0;
 	copied_objs = 0;
 	copied_props = 0;
 
@@ -1576,24 +1616,7 @@
 	}
 
 out:
-	/* if succeeded, fixup legacy plane crtc/fb ptrs before dropping
-	 * locks (ie. while it is still safe to deref plane->state).  We
-	 * need to do this here because the driver entry points cannot
-	 * distinguish between legacy and atomic ioctls.
-	 */
-	drm_for_each_plane_mask(plane, dev, plane_mask) {
-		if (ret == 0) {
-			struct drm_framebuffer *new_fb = plane->state->fb;
-			if (new_fb)
-				drm_framebuffer_reference(new_fb);
-			plane->fb = new_fb;
-			plane->crtc = plane->state->crtc;
-
-			if (plane->old_fb)
-				drm_framebuffer_unreference(plane->old_fb);
-		}
-		plane->old_fb = NULL;
-	}
+	drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
 	if (ret && arg->flags & DRM_MODE_PAGE_FLIP_EVENT) {
 		/*
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 0c6f621..e5aec45 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -210,6 +210,14 @@
 		return -EINVAL;
 	}
 
+	if (!drm_encoder_crtc_ok(new_encoder, connector_state->crtc)) {
+		DRM_DEBUG_ATOMIC("[ENCODER:%d:%s] incompatible with [CRTC:%d]\n",
+				 new_encoder->base.id,
+				 new_encoder->name,
+				 connector_state->crtc->base.id);
+		return -EINVAL;
+	}
+
 	if (new_encoder == connector_state->best_encoder) {
 		DRM_DEBUG_ATOMIC("[CONNECTOR:%d:%s] keeps [ENCODER:%d:%s], now on [CRTC:%d]\n",
 				 connector->base.id,
@@ -1553,6 +1561,9 @@
 		goto fail;
 	}
 
+	if (plane_state->crtc && (plane == plane->crtc->cursor))
+		plane_state->state->legacy_cursor_update = true;
+
 	ret = __drm_atomic_helper_disable_plane(plane, plane_state);
 	if (ret != 0)
 		goto fail;
@@ -1605,9 +1616,6 @@
 	plane_state->src_h = 0;
 	plane_state->src_w = 0;
 
-	if (plane->crtc && (plane == plane->crtc->cursor))
-		plane_state->state->legacy_cursor_update = true;
-
 	return 0;
 }
 
@@ -1741,6 +1749,7 @@
 	struct drm_crtc_state *crtc_state;
 	struct drm_plane_state *primary_state;
 	struct drm_crtc *crtc = set->crtc;
+	int hdisplay, vdisplay;
 	int ret;
 
 	crtc_state = drm_atomic_get_crtc_state(state, crtc);
@@ -1783,19 +1792,21 @@
 	if (ret != 0)
 		return ret;
 
+	drm_crtc_get_hv_timing(set->mode, &hdisplay, &vdisplay);
+
 	drm_atomic_set_fb_for_plane(primary_state, set->fb);
 	primary_state->crtc_x = 0;
 	primary_state->crtc_y = 0;
-	primary_state->crtc_h = set->mode->vdisplay;
-	primary_state->crtc_w = set->mode->hdisplay;
+	primary_state->crtc_h = vdisplay;
+	primary_state->crtc_w = hdisplay;
 	primary_state->src_x = set->x << 16;
 	primary_state->src_y = set->y << 16;
 	if (primary_state->rotation & (BIT(DRM_ROTATE_90) | BIT(DRM_ROTATE_270))) {
-		primary_state->src_h = set->mode->hdisplay << 16;
-		primary_state->src_w = set->mode->vdisplay << 16;
+		primary_state->src_h = hdisplay << 16;
+		primary_state->src_w = vdisplay << 16;
 	} else {
-		primary_state->src_h = set->mode->vdisplay << 16;
-		primary_state->src_w = set->mode->hdisplay << 16;
+		primary_state->src_h = vdisplay << 16;
+		primary_state->src_w = hdisplay << 16;
 	}
 
 commit:
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index e673c13..69cbab5 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -342,6 +342,7 @@
 	struct drm_plane *plane;
 	struct drm_atomic_state *state;
 	int i, ret;
+	unsigned plane_mask;
 
 	state = drm_atomic_state_alloc(dev);
 	if (!state)
@@ -349,11 +350,10 @@
 
 	state->acquire_ctx = dev->mode_config.acquire_ctx;
 retry:
+	plane_mask = 0;
 	drm_for_each_plane(plane, dev) {
 		struct drm_plane_state *plane_state;
 
-		plane->old_fb = plane->fb;
-
 		plane_state = drm_atomic_get_plane_state(state, plane);
 		if (IS_ERR(plane_state)) {
 			ret = PTR_ERR(plane_state);
@@ -362,6 +362,9 @@
 
 		plane_state->rotation = BIT(DRM_ROTATE_0);
 
+		plane->old_fb = plane->fb;
+		plane_mask |= 1 << drm_plane_index(plane);
+
 		/* disable non-primary: */
 		if (plane->type == DRM_PLANE_TYPE_PRIMARY)
 			continue;
@@ -382,19 +385,7 @@
 	ret = drm_atomic_commit(state);
 
 fail:
-	drm_for_each_plane(plane, dev) {
-		if (ret == 0) {
-			struct drm_framebuffer *new_fb = plane->state->fb;
-			if (new_fb)
-				drm_framebuffer_reference(new_fb);
-			plane->fb = new_fb;
-			plane->crtc = plane->state->crtc;
-
-			if (plane->old_fb)
-				drm_framebuffer_unreference(plane->old_fb);
-		}
-		plane->old_fb = NULL;
-	}
+	drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
 	if (ret == -EDEADLK)
 		goto backoff;
@@ -1236,7 +1227,9 @@
 	struct drm_fb_helper *fb_helper = info->par;
 	struct drm_device *dev = fb_helper->dev;
 	struct drm_atomic_state *state;
+	struct drm_plane *plane;
 	int i, ret;
+	unsigned plane_mask;
 
 	state = drm_atomic_state_alloc(dev);
 	if (!state)
@@ -1244,19 +1237,22 @@
 
 	state->acquire_ctx = dev->mode_config.acquire_ctx;
 retry:
+	plane_mask = 0;
 	for(i = 0; i < fb_helper->crtc_count; i++) {
 		struct drm_mode_set *mode_set;
 
 		mode_set = &fb_helper->crtc_info[i].mode_set;
 
-		mode_set->crtc->primary->old_fb = mode_set->crtc->primary->fb;
-
 		mode_set->x = var->xoffset;
 		mode_set->y = var->yoffset;
 
 		ret = __drm_atomic_helper_set_config(mode_set, state);
 		if (ret != 0)
 			goto fail;
+
+		plane = mode_set->crtc->primary;
+		plane_mask |= drm_plane_index(plane);
+		plane->old_fb = plane->fb;
 	}
 
 	ret = drm_atomic_commit(state);
@@ -1268,26 +1264,7 @@
 
 
 fail:
-	for(i = 0; i < fb_helper->crtc_count; i++) {
-		struct drm_mode_set *mode_set;
-		struct drm_plane *plane;
-
-		mode_set = &fb_helper->crtc_info[i].mode_set;
-		plane = mode_set->crtc->primary;
-
-		if (ret == 0) {
-			struct drm_framebuffer *new_fb = plane->state->fb;
-
-			if (new_fb)
-				drm_framebuffer_reference(new_fb);
-			plane->fb = new_fb;
-			plane->crtc = plane->state->crtc;
-
-			if (plane->old_fb)
-				drm_framebuffer_unreference(plane->old_fb);
-		}
-		plane->old_fb = NULL;
-	}
+	drm_atomic_clean_old_fb(dev, plane_mask, ret);
 
 	if (ret == -EDEADLK)
 		goto backoff;
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 8afda45..95bb27d 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -351,6 +351,8 @@
 	/* hsw/bdw */
 	DPLL_ID_WRPLL1 = 0,
 	DPLL_ID_WRPLL2 = 1,
+	DPLL_ID_SPLL = 2,
+
 	/* skl */
 	DPLL_ID_SKL_DPLL1 = 0,
 	DPLL_ID_SKL_DPLL2 = 1,
@@ -367,6 +369,7 @@
 
 	/* hsw, bdw */
 	uint32_t wrpll;
+	uint32_t spll;
 
 	/* skl */
 	/*
@@ -2648,6 +2651,7 @@
 	int enable_cmd_parser;
 	/* leave bools at the end to not create holes */
 	bool enable_hangcheck;
+	bool fastboot;
 	bool prefault_disable;
 	bool load_detect_test;
 	bool reset;
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 5cf4a19..91bb1fc 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -3809,6 +3809,7 @@
 int i915_gem_set_caching_ioctl(struct drm_device *dev, void *data,
 			       struct drm_file *file)
 {
+	struct drm_i915_private *dev_priv = dev->dev_private;
 	struct drm_i915_gem_caching *args = data;
 	struct drm_i915_gem_object *obj;
 	enum i915_cache_level level;
@@ -3837,9 +3838,11 @@
 		return -EINVAL;
 	}
 
+	intel_runtime_pm_get(dev_priv);
+
 	ret = i915_mutex_lock_interruptible(dev);
 	if (ret)
-		return ret;
+		goto rpm_put;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, file, args->handle));
 	if (&obj->base == NULL) {
@@ -3852,6 +3855,9 @@
 	drm_gem_object_unreference(&obj->base);
 unlock:
 	mutex_unlock(&dev->struct_mutex);
+rpm_put:
+	intel_runtime_pm_put(dev_priv);
+
 	return ret;
 }
 
diff --git a/drivers/gpu/drm/i915/i915_params.c b/drivers/gpu/drm/i915/i915_params.c
index 96bb238..4be13a5 100644
--- a/drivers/gpu/drm/i915/i915_params.c
+++ b/drivers/gpu/drm/i915/i915_params.c
@@ -40,6 +40,7 @@
 	.preliminary_hw_support = IS_ENABLED(CONFIG_DRM_I915_PRELIMINARY_HW_SUPPORT),
 	.disable_power_well = -1,
 	.enable_ips = 1,
+	.fastboot = 0,
 	.prefault_disable = 0,
 	.load_detect_test = 0,
 	.reset = true,
@@ -133,6 +134,10 @@
 module_param_named_unsafe(enable_ips, i915.enable_ips, int, 0600);
 MODULE_PARM_DESC(enable_ips, "Enable IPS (default: true)");
 
+module_param_named(fastboot, i915.fastboot, bool, 0600);
+MODULE_PARM_DESC(fastboot,
+	"Try to skip unnecessary mode sets at boot time (default: false)");
+
 module_param_named_unsafe(prefault_disable, i915.prefault_disable, bool, 0600);
 MODULE_PARM_DESC(prefault_disable,
 	"Disable page prefaulting for pread/pwrite/reloc (default:false). "
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index b84aaa0..6a2c76e 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -138,18 +138,6 @@
 	pipe_config->base.adjusted_mode.flags |= intel_crt_get_flags(encoder);
 }
 
-static void hsw_crt_pre_enable(struct intel_encoder *encoder)
-{
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-
-	WARN(I915_READ(SPLL_CTL) & SPLL_PLL_ENABLE, "SPLL already enabled\n");
-	I915_WRITE(SPLL_CTL,
-		   SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC);
-	POSTING_READ(SPLL_CTL);
-	udelay(20);
-}
-
 /* Note: The caller is required to filter out dpms modes not supported by the
  * platform. */
 static void intel_crt_set_dpms(struct intel_encoder *encoder, int mode)
@@ -216,19 +204,6 @@
 	intel_disable_crt(encoder);
 }
 
-static void hsw_crt_post_disable(struct intel_encoder *encoder)
-{
-	struct drm_device *dev = encoder->base.dev;
-	struct drm_i915_private *dev_priv = dev->dev_private;
-	uint32_t val;
-
-	DRM_DEBUG_KMS("Disabling SPLL\n");
-	val = I915_READ(SPLL_CTL);
-	WARN_ON(!(val & SPLL_PLL_ENABLE));
-	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
-	POSTING_READ(SPLL_CTL);
-}
-
 static void intel_enable_crt(struct intel_encoder *encoder)
 {
 	struct intel_crt *crt = intel_encoder_to_crt(encoder);
@@ -280,6 +255,10 @@
 	if (HAS_DDI(dev)) {
 		pipe_config->ddi_pll_sel = PORT_CLK_SEL_SPLL;
 		pipe_config->port_clock = 135000 * 2;
+
+		pipe_config->dpll_hw_state.wrpll = 0;
+		pipe_config->dpll_hw_state.spll =
+			SPLL_PLL_ENABLE | SPLL_PLL_FREQ_1350MHz | SPLL_PLL_SSC;
 	}
 
 	return true;
@@ -860,8 +839,6 @@
 	if (HAS_DDI(dev)) {
 		crt->base.get_config = hsw_crt_get_config;
 		crt->base.get_hw_state = intel_ddi_get_hw_state;
-		crt->base.pre_enable = hsw_crt_pre_enable;
-		crt->base.post_disable = hsw_crt_post_disable;
 	} else {
 		crt->base.get_config = intel_crt_get_config;
 		crt->base.get_hw_state = intel_crt_get_hw_state;
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index b25e99a..a6752a6 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1286,6 +1286,18 @@
 		}
 
 		crtc_state->ddi_pll_sel = PORT_CLK_SEL_WRPLL(pll->id);
+	} else if (crtc_state->ddi_pll_sel == PORT_CLK_SEL_SPLL) {
+		struct drm_atomic_state *state = crtc_state->base.state;
+		struct intel_shared_dpll_config *spll =
+			&intel_atomic_get_shared_dpll_state(state)[DPLL_ID_SPLL];
+
+		if (spll->crtc_mask &&
+		    WARN_ON(spll->hw_state.spll != crtc_state->dpll_hw_state.spll))
+			return false;
+
+		crtc_state->shared_dpll = DPLL_ID_SPLL;
+		spll->hw_state.spll = crtc_state->dpll_hw_state.spll;
+		spll->crtc_mask |= 1 << intel_crtc->pipe;
 	}
 
 	return true;
@@ -2437,7 +2449,7 @@
 	}
 }
 
-static void hsw_ddi_pll_enable(struct drm_i915_private *dev_priv,
+static void hsw_ddi_wrpll_enable(struct drm_i915_private *dev_priv,
 			       struct intel_shared_dpll *pll)
 {
 	I915_WRITE(WRPLL_CTL(pll->id), pll->config.hw_state.wrpll);
@@ -2445,9 +2457,17 @@
 	udelay(20);
 }
 
-static void hsw_ddi_pll_disable(struct drm_i915_private *dev_priv,
+static void hsw_ddi_spll_enable(struct drm_i915_private *dev_priv,
 				struct intel_shared_dpll *pll)
 {
+	I915_WRITE(SPLL_CTL, pll->config.hw_state.spll);
+	POSTING_READ(SPLL_CTL);
+	udelay(20);
+}
+
+static void hsw_ddi_wrpll_disable(struct drm_i915_private *dev_priv,
+				  struct intel_shared_dpll *pll)
+{
 	uint32_t val;
 
 	val = I915_READ(WRPLL_CTL(pll->id));
@@ -2455,9 +2475,19 @@
 	POSTING_READ(WRPLL_CTL(pll->id));
 }
 
-static bool hsw_ddi_pll_get_hw_state(struct drm_i915_private *dev_priv,
-				     struct intel_shared_dpll *pll,
-				     struct intel_dpll_hw_state *hw_state)
+static void hsw_ddi_spll_disable(struct drm_i915_private *dev_priv,
+				 struct intel_shared_dpll *pll)
+{
+	uint32_t val;
+
+	val = I915_READ(SPLL_CTL);
+	I915_WRITE(SPLL_CTL, val & ~SPLL_PLL_ENABLE);
+	POSTING_READ(SPLL_CTL);
+}
+
+static bool hsw_ddi_wrpll_get_hw_state(struct drm_i915_private *dev_priv,
+				       struct intel_shared_dpll *pll,
+				       struct intel_dpll_hw_state *hw_state)
 {
 	uint32_t val;
 
@@ -2470,25 +2500,50 @@
 	return val & WRPLL_PLL_ENABLE;
 }
 
+static bool hsw_ddi_spll_get_hw_state(struct drm_i915_private *dev_priv,
+				      struct intel_shared_dpll *pll,
+				      struct intel_dpll_hw_state *hw_state)
+{
+	uint32_t val;
+
+	if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+		return false;
+
+	val = I915_READ(SPLL_CTL);
+	hw_state->spll = val;
+
+	return val & SPLL_PLL_ENABLE;
+}
+
+
 static const char * const hsw_ddi_pll_names[] = {
 	"WRPLL 1",
 	"WRPLL 2",
+	"SPLL"
 };
 
 static void hsw_shared_dplls_init(struct drm_i915_private *dev_priv)
 {
 	int i;
 
-	dev_priv->num_shared_dpll = 2;
+	dev_priv->num_shared_dpll = 3;
 
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+	for (i = 0; i < 2; i++) {
 		dev_priv->shared_dplls[i].id = i;
 		dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
-		dev_priv->shared_dplls[i].disable = hsw_ddi_pll_disable;
-		dev_priv->shared_dplls[i].enable = hsw_ddi_pll_enable;
+		dev_priv->shared_dplls[i].disable = hsw_ddi_wrpll_disable;
+		dev_priv->shared_dplls[i].enable = hsw_ddi_wrpll_enable;
 		dev_priv->shared_dplls[i].get_hw_state =
-			hsw_ddi_pll_get_hw_state;
+			hsw_ddi_wrpll_get_hw_state;
 	}
+
+	/* SPLL is special, but needs to be initialized anyway.. */
+	dev_priv->shared_dplls[i].id = i;
+	dev_priv->shared_dplls[i].name = hsw_ddi_pll_names[i];
+	dev_priv->shared_dplls[i].disable = hsw_ddi_spll_disable;
+	dev_priv->shared_dplls[i].enable = hsw_ddi_spll_enable;
+	dev_priv->shared_dplls[i].get_hw_state = hsw_ddi_spll_get_hw_state;
+
 }
 
 static const char * const skl_ddi_pll_names[] = {
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index f62ffc0..71860f8 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -2646,11 +2646,13 @@
 	return;
 
 valid_fb:
-	plane_state->src_x = plane_state->src_y = 0;
+	plane_state->src_x = 0;
+	plane_state->src_y = 0;
 	plane_state->src_w = fb->width << 16;
 	plane_state->src_h = fb->height << 16;
 
-	plane_state->crtc_x = plane_state->src_y = 0;
+	plane_state->crtc_x = 0;
+	plane_state->crtc_y = 0;
 	plane_state->crtc_w = fb->width;
 	plane_state->crtc_h = fb->height;
 
@@ -4237,6 +4239,7 @@
 	struct intel_shared_dpll *pll;
 	struct intel_shared_dpll_config *shared_dpll;
 	enum intel_dpll_id i;
+	int max = dev_priv->num_shared_dpll;
 
 	shared_dpll = intel_atomic_get_shared_dpll_state(crtc_state->base.state);
 
@@ -4271,9 +4274,11 @@
 		WARN_ON(shared_dpll[i].crtc_mask);
 
 		goto found;
-	}
+	} else if (INTEL_INFO(dev_priv)->gen < 9 && HAS_DDI(dev_priv))
+		/* Do not consider SPLL */
+		max = 2;
 
-	for (i = 0; i < dev_priv->num_shared_dpll; i++) {
+	for (i = 0; i < max; i++) {
 		pll = &dev_priv->shared_dplls[i];
 
 		/* Only want to check enabled timings first */
@@ -9723,6 +9728,8 @@
 	case PORT_CLK_SEL_WRPLL2:
 		pipe_config->shared_dpll = DPLL_ID_WRPLL2;
 		break;
+	case PORT_CLK_SEL_SPLL:
+		pipe_config->shared_dpll = DPLL_ID_SPLL;
 	}
 }
 
@@ -12003,9 +12010,10 @@
 			      pipe_config->dpll_hw_state.cfgcr1,
 			      pipe_config->dpll_hw_state.cfgcr2);
 	} else if (HAS_DDI(dev)) {
-		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x\n",
+		DRM_DEBUG_KMS("ddi_pll_sel: %u; dpll_hw_state: wrpll: 0x%x spll: 0x%x\n",
 			      pipe_config->ddi_pll_sel,
-			      pipe_config->dpll_hw_state.wrpll);
+			      pipe_config->dpll_hw_state.wrpll,
+			      pipe_config->dpll_hw_state.spll);
 	} else {
 		DRM_DEBUG_KMS("dpll_hw_state: dpll: 0x%x, dpll_md: 0x%x, "
 			      "fp0: 0x%x, fp1: 0x%x\n",
@@ -12528,6 +12536,7 @@
 	PIPE_CONF_CHECK_X(dpll_hw_state.fp0);
 	PIPE_CONF_CHECK_X(dpll_hw_state.fp1);
 	PIPE_CONF_CHECK_X(dpll_hw_state.wrpll);
+	PIPE_CONF_CHECK_X(dpll_hw_state.spll);
 	PIPE_CONF_CHECK_X(dpll_hw_state.ctrl1);
 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr1);
 	PIPE_CONF_CHECK_X(dpll_hw_state.cfgcr2);
@@ -13032,6 +13041,9 @@
 		struct intel_crtc_state *pipe_config =
 			to_intel_crtc_state(crtc_state);
 
+		memset(&to_intel_crtc(crtc)->atomic, 0,
+		       sizeof(struct intel_crtc_atomic_commit));
+
 		/* Catch I915_MODE_FLAG_INHERITED */
 		if (crtc_state->mode.private_flags != crtc->state->mode.private_flags)
 			crtc_state->mode_changed = true;
@@ -13056,7 +13068,8 @@
 		if (ret)
 			return ret;
 
-		if (intel_pipe_config_compare(state->dev,
+		if (i915.fastboot &&
+		    intel_pipe_config_compare(state->dev,
 					to_intel_crtc_state(crtc->state),
 					pipe_config, true)) {
 			crtc_state->mode_changed = false;
@@ -14364,16 +14377,17 @@
 static struct drm_framebuffer *
 intel_user_framebuffer_create(struct drm_device *dev,
 			      struct drm_file *filp,
-			      struct drm_mode_fb_cmd2 *mode_cmd)
+			      struct drm_mode_fb_cmd2 *user_mode_cmd)
 {
 	struct drm_i915_gem_object *obj;
+	struct drm_mode_fb_cmd2 mode_cmd = *user_mode_cmd;
 
 	obj = to_intel_bo(drm_gem_object_lookup(dev, filp,
-						mode_cmd->handles[0]));
+						mode_cmd.handles[0]));
 	if (&obj->base == NULL)
 		return ERR_PTR(-ENOENT);
 
-	return intel_framebuffer_create(dev, mode_cmd, obj);
+	return intel_framebuffer_create(dev, &mode_cmd, obj);
 }
 
 #ifndef CONFIG_DRM_FBDEV_EMULATION
@@ -14705,6 +14719,9 @@
 	/* Apple Macbook 2,1 (Core 2 T7400) */
 	{ 0x27a2, 0x8086, 0x7270, quirk_backlight_present },
 
+	/* Apple Macbook 4,1 */
+	{ 0x2a02, 0x106b, 0x00a1, quirk_backlight_present },
+
 	/* Toshiba CB35 Chromebook (Celeron 2955U) */
 	{ 0x0a06, 0x1179, 0x0a88, quirk_backlight_present },
 
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index d52a15d..071a76b 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -4449,7 +4449,7 @@
 	POSTING_READ(GEN6_RPNSWREQ);
 
 	dev_priv->rps.cur_freq = val;
-	trace_intel_gpu_freq_change(val * 50);
+	trace_intel_gpu_freq_change(intel_gpu_freq(dev_priv, val));
 }
 
 static void valleyview_set_rps(struct drm_device *dev, u8 val)
@@ -7255,7 +7255,8 @@
 int intel_gpu_freq(struct drm_i915_private *dev_priv, int val)
 {
 	if (IS_GEN9(dev_priv->dev))
-		return (val * GT_FREQUENCY_MULTIPLIER) / GEN9_FREQ_SCALER;
+		return DIV_ROUND_CLOSEST(val * GT_FREQUENCY_MULTIPLIER,
+					 GEN9_FREQ_SCALER);
 	else if (IS_CHERRYVIEW(dev_priv->dev))
 		return chv_gpu_freq(dev_priv, val);
 	else if (IS_VALLEYVIEW(dev_priv->dev))
@@ -7267,13 +7268,14 @@
 int intel_freq_opcode(struct drm_i915_private *dev_priv, int val)
 {
 	if (IS_GEN9(dev_priv->dev))
-		return (val * GEN9_FREQ_SCALER) / GT_FREQUENCY_MULTIPLIER;
+		return DIV_ROUND_CLOSEST(val * GEN9_FREQ_SCALER,
+					 GT_FREQUENCY_MULTIPLIER);
 	else if (IS_CHERRYVIEW(dev_priv->dev))
 		return chv_freq_opcode(dev_priv, val);
 	else if (IS_VALLEYVIEW(dev_priv->dev))
 		return byt_freq_opcode(dev_priv, val);
 	else
-		return val / GT_FREQUENCY_MULTIPLIER;
+		return DIV_ROUND_CLOSEST(val, GT_FREQUENCY_MULTIPLIER);
 }
 
 struct request_boost {
diff --git a/drivers/gpu/drm/mgag200/mgag200_cursor.c b/drivers/gpu/drm/mgag200/mgag200_cursor.c
index 4f2068f..a7bf6a9 100644
--- a/drivers/gpu/drm/mgag200/mgag200_cursor.c
+++ b/drivers/gpu/drm/mgag200/mgag200_cursor.c
@@ -70,6 +70,11 @@
 	BUG_ON(pixels_2 != pixels_current && pixels_2 != pixels_prev);
 	BUG_ON(pixels_current == pixels_prev);
 
+	if (!handle || !file_priv) {
+		mga_hide_cursor(mdev);
+		return 0;
+	}
+
 	obj = drm_gem_object_lookup(dev, file_priv, handle);
 	if (!obj)
 		return -ENOENT;
@@ -88,12 +93,6 @@
 		goto out_unreserve1;
 	}
 
-	if (!handle) {
-		mga_hide_cursor(mdev);
-		ret = 0;
-		goto out1;
-	}
-
 	/* Move cursor buffers into VRAM if they aren't already */
 	if (!pixels_1->pin_count) {
 		ret = mgag200_bo_pin(pixels_1, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index d302488..84d4563 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -221,11 +221,17 @@
 	if (!(rdev->flags & RADEON_IS_PCIE))
 		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
 
+	/* Write-combined CPU mappings of GTT cause GPU hangs with RV6xx
+	 * See https://bugs.freedesktop.org/show_bug.cgi?id=91268
+	 */
+	if (rdev->family >= CHIP_RV610 && rdev->family <= CHIP_RV635)
+		bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
+
 #ifdef CONFIG_X86_32
 	/* XXX: Write-combined CPU mappings of GTT seem broken on 32-bit
 	 * See https://bugs.freedesktop.org/show_bug.cgi?id=84627
 	 */
-	bo->flags &= ~RADEON_GEM_GTT_WC;
+	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
 #elif defined(CONFIG_X86) && !defined(CONFIG_X86_PAT)
 	/* Don't try to enable write-combining when it can't work, or things
 	 * may be slow
@@ -235,9 +241,10 @@
 #warning Please enable CONFIG_MTRR and CONFIG_X86_PAT for better performance \
 	 thanks to write-combining
 
-	DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
-		      "better performance thanks to write-combining\n");
-	bo->flags &= ~RADEON_GEM_GTT_WC;
+	if (bo->flags & RADEON_GEM_GTT_WC)
+		DRM_INFO_ONCE("Please enable CONFIG_MTRR and CONFIG_X86_PAT for "
+			      "better performance thanks to write-combining\n");
+	bo->flags &= ~(RADEON_GEM_GTT_WC | RADEON_GEM_GTT_UC);
 #endif
 
 	radeon_ttm_placement_from_domain(bo, domain);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 6d80dde..f4f03dc 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -1542,8 +1542,7 @@
 				ret = device_create_file(rdev->dev, &dev_attr_power_method);
 				if (ret)
 					DRM_ERROR("failed to create device file for power method\n");
-				if (!ret)
-					rdev->pm.sysfs_initialized = true;
+				rdev->pm.sysfs_initialized = true;
 			}
 
 			mutex_lock(&rdev->pm.mutex);
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index e72bf46..a82b891 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -2927,7 +2927,7 @@
 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x1462, 0x3036, 0, 120000 },
 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x174b, 0xe271, 0, 120000 },
 	{ PCI_VENDOR_ID_ATI, 0x6810, 0x174b, 0xe271, 85000, 90000 },
-	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1762, 0x2015, 0, 120000 },
+	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1462, 0x2015, 0, 120000 },
 	{ PCI_VENDOR_ID_ATI, 0x6811, 0x1043, 0x2015, 0, 120000 },
 	{ 0, 0, 0, 0 },
 };
diff --git a/drivers/gpu/drm/vc4/vc4_crtc.c b/drivers/gpu/drm/vc4/vc4_crtc.c
index 7a9f476..265064c 100644
--- a/drivers/gpu/drm/vc4/vc4_crtc.c
+++ b/drivers/gpu/drm/vc4/vc4_crtc.c
@@ -168,7 +168,7 @@
 	struct drm_connector *connector;
 
 	drm_for_each_connector(connector, crtc->dev) {
-		if (connector && connector->state->crtc == crtc) {
+		if (connector->state->crtc == crtc) {
 			struct drm_encoder *encoder = connector->encoder;
 			struct vc4_encoder *vc4_encoder =
 				to_vc4_encoder(encoder);
@@ -401,7 +401,8 @@
 		dlist_next++;
 
 		HVS_WRITE(SCALER_DISPLISTX(vc4_crtc->channel),
-			  (u32 *)vc4_crtc->dlist - (u32 *)vc4->hvs->dlist);
+			  (u32 __iomem *)vc4_crtc->dlist -
+			  (u32 __iomem *)vc4->hvs->dlist);
 
 		/* Make the next display list start after ours. */
 		vc4_crtc->dlist_size -= (dlist_next - vc4_crtc->dlist);
@@ -591,14 +592,14 @@
 	 * that will take too much.
 	 */
 	primary_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_PRIMARY);
-	if (!primary_plane) {
+	if (IS_ERR(primary_plane)) {
 		dev_err(dev, "failed to construct primary plane\n");
 		ret = PTR_ERR(primary_plane);
 		goto err;
 	}
 
 	cursor_plane = vc4_plane_init(drm, DRM_PLANE_TYPE_CURSOR);
-	if (!cursor_plane) {
+	if (IS_ERR(cursor_plane)) {
 		dev_err(dev, "failed to construct cursor plane\n");
 		ret = PTR_ERR(cursor_plane);
 		goto err_primary;
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 6e73060..d5db9e0 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -259,7 +259,6 @@
 	.remove		= vc4_platform_drm_remove,
 	.driver		= {
 		.name	= "vc4-drm",
-		.owner	= THIS_MODULE,
 		.of_match_table = vc4_of_match,
 	},
 };
diff --git a/drivers/gpu/drm/vc4/vc4_hvs.c b/drivers/gpu/drm/vc4/vc4_hvs.c
index ab1673f..8098c5b 100644
--- a/drivers/gpu/drm/vc4/vc4_hvs.c
+++ b/drivers/gpu/drm/vc4/vc4_hvs.c
@@ -75,10 +75,10 @@
 	for (i = 0; i < 64; i += 4) {
 		DRM_INFO("0x%08x (%s): 0x%08x 0x%08x 0x%08x 0x%08x\n",
 			 i * 4, i < HVS_BOOTLOADER_DLIST_END ? "B" : "D",
-			 ((uint32_t *)vc4->hvs->dlist)[i + 0],
-			 ((uint32_t *)vc4->hvs->dlist)[i + 1],
-			 ((uint32_t *)vc4->hvs->dlist)[i + 2],
-			 ((uint32_t *)vc4->hvs->dlist)[i + 3]);
+			 readl((u32 __iomem *)vc4->hvs->dlist + i + 0),
+			 readl((u32 __iomem *)vc4->hvs->dlist + i + 1),
+			 readl((u32 __iomem *)vc4->hvs->dlist + i + 2),
+			 readl((u32 __iomem *)vc4->hvs->dlist + i + 3));
 	}
 }
 
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index cdd8b10..887f3ca 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -70,7 +70,7 @@
 	return state->fb && state->crtc;
 }
 
-struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
+static struct drm_plane_state *vc4_plane_duplicate_state(struct drm_plane *plane)
 {
 	struct vc4_plane_state *vc4_state;
 
@@ -97,8 +97,8 @@
 	return &vc4_state->base;
 }
 
-void vc4_plane_destroy_state(struct drm_plane *plane,
-			     struct drm_plane_state *state)
+static void vc4_plane_destroy_state(struct drm_plane *plane,
+				    struct drm_plane_state *state)
 {
 	struct vc4_plane_state *vc4_state = to_vc4_plane_state(state);
 
@@ -108,7 +108,7 @@
 }
 
 /* Called during init to allocate the plane's atomic state. */
-void vc4_plane_reset(struct drm_plane *plane)
+static void vc4_plane_reset(struct drm_plane *plane)
 {
 	struct vc4_plane_state *vc4_state;
 
@@ -157,6 +157,16 @@
 	int crtc_w = state->crtc_w;
 	int crtc_h = state->crtc_h;
 
+	if (state->crtc_w << 16 != state->src_w ||
+	    state->crtc_h << 16 != state->src_h) {
+		/* We don't support scaling yet, which involves
+		 * allocating the LBM memory for scaling temporary
+		 * storage, and putting filter kernels in the HVS
+		 * context.
+		 */
+		return -EINVAL;
+	}
+
 	if (crtc_x < 0) {
 		offset += drm_format_plane_cpp(fb->pixel_format, 0) * -crtc_x;
 		crtc_w += crtc_x;
diff --git a/drivers/hid/wacom_wac.c b/drivers/hid/wacom_wac.c
index 8b29949..01a4f05 100644
--- a/drivers/hid/wacom_wac.c
+++ b/drivers/hid/wacom_wac.c
@@ -2481,7 +2481,7 @@
 		if (features->pktlen == WACOM_PKGLEN_BBTOUCH3) {
 			if (features->touch_max)
 				features->device_type |= WACOM_DEVICETYPE_TOUCH;
-			if (features->type >= INTUOSHT || features->type <= BAMBOO_PT)
+			if (features->type >= INTUOSHT && features->type <= BAMBOO_PT)
 				features->device_type |= WACOM_DEVICETYPE_PAD;
 
 			features->x_max = 4096;
@@ -3213,7 +3213,8 @@
 	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x336 =
 	{ "Wacom DTU1141", 23472, 13203, 1023, 0,
-	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4 };
+	  DTUS, WACOM_INTUOS_RES, WACOM_INTUOS_RES, 4,
+	  WACOM_DTU_OFFSET, WACOM_DTU_OFFSET };
 static const struct wacom_features wacom_features_0x57 =
 	{ "Wacom DTK2241", 95640, 54060, 2047, 63,
 	  DTK, WACOM_INTUOS3_RES, WACOM_INTUOS3_RES, 6,
diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig
index 842b004..8f59f05 100644
--- a/drivers/hwmon/Kconfig
+++ b/drivers/hwmon/Kconfig
@@ -324,6 +324,7 @@
 config SENSORS_ARM_SCPI
 	tristate "ARM SCPI Sensors"
 	depends on ARM_SCPI_PROTOCOL
+	depends on THERMAL || !THERMAL_OF
 	help
 	  This driver provides support for temperature, voltage, current
 	  and power sensors available on ARM Ltd's SCP based platforms. The
@@ -1471,6 +1472,7 @@
 config SENSORS_INA2XX
 	tristate "Texas Instruments INA219 and compatibles"
 	depends on I2C
+	select REGMAP_I2C
 	help
 	  If you say yes here you get support for INA219, INA220, INA226,
 	  INA230, and INA231 power monitor chips.
diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
index 1f5e956..0af7fd3 100644
--- a/drivers/hwmon/applesmc.c
+++ b/drivers/hwmon/applesmc.c
@@ -537,7 +537,7 @@
 static int applesmc_init_smcreg_try(void)
 {
 	struct applesmc_registers *s = &smcreg;
-	bool left_light_sensor, right_light_sensor;
+	bool left_light_sensor = 0, right_light_sensor = 0;
 	unsigned int count;
 	u8 tmp[1];
 	int ret;
diff --git a/drivers/hwmon/scpi-hwmon.c b/drivers/hwmon/scpi-hwmon.c
index 2c1241b..7e20567 100644
--- a/drivers/hwmon/scpi-hwmon.c
+++ b/drivers/hwmon/scpi-hwmon.c
@@ -117,7 +117,7 @@
 	struct scpi_ops *scpi_ops;
 	struct device *hwdev, *dev = &pdev->dev;
 	struct scpi_sensors *scpi_sensors;
-	int ret;
+	int ret, idx;
 
 	scpi_ops = get_scpi_ops();
 	if (!scpi_ops)
@@ -146,8 +146,8 @@
 
 	scpi_sensors->scpi_ops = scpi_ops;
 
-	for (i = 0; i < nr_sensors; i++) {
-		struct sensor_data *sensor = &scpi_sensors->data[i];
+	for (i = 0, idx = 0; i < nr_sensors; i++) {
+		struct sensor_data *sensor = &scpi_sensors->data[idx];
 
 		ret = scpi_ops->sensor_get_info(i, &sensor->info);
 		if (ret)
@@ -183,7 +183,7 @@
 			num_power++;
 			break;
 		default:
-			break;
+			continue;
 		}
 
 		sensor->dev_attr_input.attr.mode = S_IRUGO;
@@ -194,11 +194,12 @@
 		sensor->dev_attr_label.show = scpi_show_label;
 		sensor->dev_attr_label.attr.name = sensor->label;
 
-		scpi_sensors->attrs[i << 1] = &sensor->dev_attr_input.attr;
-		scpi_sensors->attrs[(i << 1) + 1] = &sensor->dev_attr_label.attr;
+		scpi_sensors->attrs[idx << 1] = &sensor->dev_attr_input.attr;
+		scpi_sensors->attrs[(idx << 1) + 1] = &sensor->dev_attr_label.attr;
 
-		sysfs_attr_init(scpi_sensors->attrs[i << 1]);
-		sysfs_attr_init(scpi_sensors->attrs[(i << 1) + 1]);
+		sysfs_attr_init(scpi_sensors->attrs[idx << 1]);
+		sysfs_attr_init(scpi_sensors->attrs[(idx << 1) + 1]);
+		idx++;
 	}
 
 	scpi_sensors->group.attrs = scpi_sensors->attrs;
@@ -236,8 +237,8 @@
 
 		zone->sensor_id = i;
 		zone->scpi_sensors = scpi_sensors;
-		zone->tzd = thermal_zone_of_sensor_register(dev, i, zone,
-							    &scpi_sensor_ops);
+		zone->tzd = thermal_zone_of_sensor_register(dev,
+				sensor->info.sensor_id, zone, &scpi_sensor_ops);
 		/*
 		 * The call to thermal_zone_of_sensor_register returns
 		 * an error for sensors that are not associated with
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index e24c2b6..7b0aa82 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -126,6 +126,7 @@
 	    Sunrise Point-LP (PCH)
 	    DNV (SOC)
 	    Broxton (SOC)
+	    Lewisburg (PCH)
 
 	  This driver can also be built as a module.  If so, the module
 	  will be called i2c-i801.
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index c306751..f62d697 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -62,6 +62,8 @@
  * Sunrise Point-LP (PCH)	0x9d23	32	hard	yes	yes	yes
  * DNV (SOC)			0x19df	32	hard	yes	yes	yes
  * Broxton (SOC)		0x5ad4	32	hard	yes	yes	yes
+ * Lewisburg (PCH)		0xa1a3	32	hard	yes	yes	yes
+ * Lewisburg Supersku (PCH)	0xa223	32	hard	yes	yes	yes
  *
  * Features supported by this driver:
  * Software PEC				no
@@ -206,6 +208,8 @@
 #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS	0x9d23
 #define PCI_DEVICE_ID_INTEL_DNV_SMBUS			0x19df
 #define PCI_DEVICE_ID_INTEL_BROXTON_SMBUS		0x5ad4
+#define PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS		0xa1a3
+#define PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS	0xa223
 
 struct i801_mux_config {
 	char *gpio_chip;
@@ -869,6 +873,8 @@
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_DNV_SMBUS) },
 	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BROXTON_SMBUS) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS) },
+	{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS) },
 	{ 0, }
 };
 
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 1e4d99d..9bb0b05 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -50,6 +50,7 @@
 #include <linux/of_device.h>
 #include <linux/of_dma.h>
 #include <linux/of_gpio.h>
+#include <linux/pinctrl/consumer.h>
 #include <linux/platform_data/i2c-imx.h>
 #include <linux/platform_device.h>
 #include <linux/sched.h>
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index e23a7b0..0b20449 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -662,8 +662,10 @@
 
 static void xiic_start_xfer(struct xiic_i2c *i2c)
 {
-
+	spin_lock(&i2c->lock);
+	xiic_reinit(i2c);
 	__xiic_start_xfer(i2c);
+	spin_unlock(&i2c->lock);
 }
 
 static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index 040af5c..ba8eb08 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -715,7 +715,7 @@
 		if (wakeirq > 0 && wakeirq != client->irq)
 			status = dev_pm_set_dedicated_wake_irq(dev, wakeirq);
 		else if (client->irq > 0)
-			status = dev_pm_set_wake_irq(dev, wakeirq);
+			status = dev_pm_set_wake_irq(dev, client->irq);
 		else
 			status = 0;
 
diff --git a/drivers/iommu/s390-iommu.c b/drivers/iommu/s390-iommu.c
index cbe198c..471ee36 100644
--- a/drivers/iommu/s390-iommu.c
+++ b/drivers/iommu/s390-iommu.c
@@ -216,6 +216,7 @@
 	u8 *page_addr = (u8 *) (pa & PAGE_MASK);
 	dma_addr_t start_dma_addr = dma_addr;
 	unsigned long irq_flags, nr_pages, i;
+	unsigned long *entry;
 	int rc = 0;
 
 	if (dma_addr < s390_domain->domain.geometry.aperture_start ||
@@ -228,8 +229,12 @@
 
 	spin_lock_irqsave(&s390_domain->dma_table_lock, irq_flags);
 	for (i = 0; i < nr_pages; i++) {
-		dma_update_cpu_trans(s390_domain->dma_table, page_addr,
-				     dma_addr, flags);
+		entry = dma_walk_cpu_trans(s390_domain->dma_table, dma_addr);
+		if (!entry) {
+			rc = -ENOMEM;
+			goto undo_cpu_trans;
+		}
+		dma_update_cpu_trans(entry, page_addr, flags);
 		page_addr += PAGE_SIZE;
 		dma_addr += PAGE_SIZE;
 	}
@@ -242,6 +247,20 @@
 			break;
 	}
 	spin_unlock(&s390_domain->list_lock);
+
+undo_cpu_trans:
+	if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
+		flags = ZPCI_PTE_INVALID;
+		while (i-- > 0) {
+			page_addr -= PAGE_SIZE;
+			dma_addr -= PAGE_SIZE;
+			entry = dma_walk_cpu_trans(s390_domain->dma_table,
+						   dma_addr);
+			if (!entry)
+				break;
+			dma_update_cpu_trans(entry, page_addr, flags);
+		}
+	}
 	spin_unlock_irqrestore(&s390_domain->dma_table_lock, irq_flags);
 
 	return rc;
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 23b6c8e..d848616 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -65,8 +65,7 @@
 #define MMC_SANITIZE_REQ_TIMEOUT 240000
 #define MMC_EXTRACT_INDEX_FROM_ARG(x) ((x & 0x00FF0000) >> 16)
 
-#define mmc_req_rel_wr(req)	(((req->cmd_flags & REQ_FUA) || \
-				  (req->cmd_flags & REQ_META)) && \
+#define mmc_req_rel_wr(req)	((req->cmd_flags & REQ_FUA) && \
 				  (rq_data_dir(req) == WRITE))
 #define PACKED_CMD_VER	0x01
 #define PACKED_CMD_WR	0x02
@@ -1467,13 +1466,9 @@
 
 	/*
 	 * Reliable writes are used to implement Forced Unit Access and
-	 * REQ_META accesses, and are supported only on MMCs.
-	 *
-	 * XXX: this really needs a good explanation of why REQ_META
-	 * is treated special.
+	 * are supported only on MMCs.
 	 */
-	bool do_rel_wr = ((req->cmd_flags & REQ_FUA) ||
-			  (req->cmd_flags & REQ_META)) &&
+	bool do_rel_wr = (req->cmd_flags & REQ_FUA) &&
 		(rq_data_dir(req) == WRITE) &&
 		(md->flags & MMC_BLK_REL_WR);
 
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index c793fda..3a9a79e 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -1040,71 +1040,6 @@
 	return err;
 }
 
-static int mmc_select_hs400(struct mmc_card *card)
-{
-	struct mmc_host *host = card->host;
-	int err = 0;
-	u8 val;
-
-	/*
-	 * HS400 mode requires 8-bit bus width
-	 */
-	if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
-	      host->ios.bus_width == MMC_BUS_WIDTH_8))
-		return 0;
-
-	/*
-	 * Before switching to dual data rate operation for HS400,
-	 * it is required to convert from HS200 mode to HS mode.
-	 */
-	mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
-	mmc_set_bus_speed(card);
-
-	val = EXT_CSD_TIMING_HS |
-	      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
-	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-			   EXT_CSD_HS_TIMING, val,
-			   card->ext_csd.generic_cmd6_time,
-			   true, true, true);
-	if (err) {
-		pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
-			mmc_hostname(host), err);
-		return err;
-	}
-
-	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-			 EXT_CSD_BUS_WIDTH,
-			 EXT_CSD_DDR_BUS_WIDTH_8,
-			 card->ext_csd.generic_cmd6_time);
-	if (err) {
-		pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
-			mmc_hostname(host), err);
-		return err;
-	}
-
-	val = EXT_CSD_TIMING_HS400 |
-	      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
-	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
-			   EXT_CSD_HS_TIMING, val,
-			   card->ext_csd.generic_cmd6_time,
-			   true, true, true);
-	if (err) {
-		pr_err("%s: switch to hs400 failed, err:%d\n",
-			 mmc_hostname(host), err);
-		return err;
-	}
-
-	mmc_set_timing(host, MMC_TIMING_MMC_HS400);
-	mmc_set_bus_speed(card);
-
-	return 0;
-}
-
-int mmc_hs200_to_hs400(struct mmc_card *card)
-{
-	return mmc_select_hs400(card);
-}
-
 /* Caller must hold re-tuning */
 static int mmc_switch_status(struct mmc_card *card)
 {
@@ -1118,6 +1053,97 @@
 	return mmc_switch_status_error(card->host, status);
 }
 
+static int mmc_select_hs400(struct mmc_card *card)
+{
+	struct mmc_host *host = card->host;
+	bool send_status = true;
+	unsigned int max_dtr;
+	int err = 0;
+	u8 val;
+
+	/*
+	 * HS400 mode requires 8-bit bus width
+	 */
+	if (!(card->mmc_avail_type & EXT_CSD_CARD_TYPE_HS400 &&
+	      host->ios.bus_width == MMC_BUS_WIDTH_8))
+		return 0;
+
+	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+		send_status = false;
+
+	/* Reduce frequency to HS frequency */
+	max_dtr = card->ext_csd.hs_max_dtr;
+	mmc_set_clock(host, max_dtr);
+
+	/* Switch card to HS mode */
+	val = EXT_CSD_TIMING_HS |
+	      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			   EXT_CSD_HS_TIMING, val,
+			   card->ext_csd.generic_cmd6_time,
+			   true, send_status, true);
+	if (err) {
+		pr_err("%s: switch to high-speed from hs200 failed, err:%d\n",
+			mmc_hostname(host), err);
+		return err;
+	}
+
+	/* Set host controller to HS timing */
+	mmc_set_timing(card->host, MMC_TIMING_MMC_HS);
+
+	if (!send_status) {
+		err = mmc_switch_status(card);
+		if (err)
+			goto out_err;
+	}
+
+	/* Switch card to DDR */
+	err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			 EXT_CSD_BUS_WIDTH,
+			 EXT_CSD_DDR_BUS_WIDTH_8,
+			 card->ext_csd.generic_cmd6_time);
+	if (err) {
+		pr_err("%s: switch to bus width for hs400 failed, err:%d\n",
+			mmc_hostname(host), err);
+		return err;
+	}
+
+	/* Switch card to HS400 */
+	val = EXT_CSD_TIMING_HS400 |
+	      card->drive_strength << EXT_CSD_DRV_STR_SHIFT;
+	err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
+			   EXT_CSD_HS_TIMING, val,
+			   card->ext_csd.generic_cmd6_time,
+			   true, send_status, true);
+	if (err) {
+		pr_err("%s: switch to hs400 failed, err:%d\n",
+			 mmc_hostname(host), err);
+		return err;
+	}
+
+	/* Set host controller to HS400 timing and frequency */
+	mmc_set_timing(host, MMC_TIMING_MMC_HS400);
+	mmc_set_bus_speed(card);
+
+	if (!send_status) {
+		err = mmc_switch_status(card);
+		if (err)
+			goto out_err;
+	}
+
+	return 0;
+
+out_err:
+	pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+	       __func__, err);
+	return err;
+}
+
+int mmc_hs200_to_hs400(struct mmc_card *card)
+{
+	return mmc_select_hs400(card);
+}
+
 int mmc_hs400_to_hs200(struct mmc_card *card)
 {
 	struct mmc_host *host = card->host;
@@ -1219,6 +1245,8 @@
 static int mmc_select_hs200(struct mmc_card *card)
 {
 	struct mmc_host *host = card->host;
+	bool send_status = true;
+	unsigned int old_timing;
 	int err = -EINVAL;
 	u8 val;
 
@@ -1234,6 +1262,9 @@
 
 	mmc_select_driver_type(card);
 
+	if (host->caps & MMC_CAP_WAIT_WHILE_BUSY)
+		send_status = false;
+
 	/*
 	 * Set the bus width(4 or 8) with host's support and
 	 * switch to HS200 mode if bus width is set successfully.
@@ -1245,11 +1276,25 @@
 		err = __mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
 				   EXT_CSD_HS_TIMING, val,
 				   card->ext_csd.generic_cmd6_time,
-				   true, true, true);
-		if (!err)
-			mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+				   true, send_status, true);
+		if (err)
+			goto err;
+		old_timing = host->ios.timing;
+		mmc_set_timing(host, MMC_TIMING_MMC_HS200);
+		if (!send_status) {
+			err = mmc_switch_status(card);
+			/*
+			 * mmc_select_timing() assumes timing has not changed if
+			 * it is a switch error.
+			 */
+			if (err == -EBADMSG)
+				mmc_set_timing(host, old_timing);
+		}
 	}
 err:
+	if (err)
+		pr_err("%s: %s failed, error %d\n", mmc_hostname(card->host),
+		       __func__, err);
 	return err;
 }
 
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index af71de5..1dee533 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -473,6 +473,7 @@
 
 config MMC_GOLDFISH
 	tristate "goldfish qemu Multimedia Card Interface support"
+	depends on HAS_DMA
 	depends on GOLDFISH || COMPILE_TEST
 	help
 	  This selects the Goldfish Multimedia card Interface emulation
diff --git a/drivers/mmc/host/mtk-sd.c b/drivers/mmc/host/mtk-sd.c
index 39568cc..33dfd7e 100644
--- a/drivers/mmc/host/mtk-sd.c
+++ b/drivers/mmc/host/mtk-sd.c
@@ -1276,7 +1276,7 @@
 	int start = 0, len = 0;
 	int start_final = 0, len_final = 0;
 	u8 final_phase = 0xff;
-	struct msdc_delay_phase delay_phase;
+	struct msdc_delay_phase delay_phase = { 0, };
 
 	if (delay == 0) {
 		dev_err(host->dev, "phase error: [map:%x]\n", delay);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index 8cadd74..ce08896 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -805,7 +805,7 @@
 		goto out;
 	} else {
 		mmc->caps |= host->pdata->gpio_card_ro_invert ?
-			MMC_CAP2_RO_ACTIVE_HIGH : 0;
+			0 : MMC_CAP2_RO_ACTIVE_HIGH;
 	}
 
 	if (gpio_is_valid(gpio_cd))
diff --git a/drivers/mtd/nand/jz4740_nand.c b/drivers/mtd/nand/jz4740_nand.c
index dc4e844..5a99a93 100644
--- a/drivers/mtd/nand/jz4740_nand.c
+++ b/drivers/mtd/nand/jz4740_nand.c
@@ -25,6 +25,7 @@
 
 #include <linux/gpio.h>
 
+#include <asm/mach-jz4740/gpio.h>
 #include <asm/mach-jz4740/jz4740_nand.h>
 
 #define JZ_REG_NAND_CTRL	0x50
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index cc74142..ece544e 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -3110,7 +3110,7 @@
  */
 static void nand_shutdown(struct mtd_info *mtd)
 {
-	nand_get_device(mtd, FL_SHUTDOWN);
+	nand_get_device(mtd, FL_PM_SUSPENDED);
 }
 
 /* Set default functions */
diff --git a/drivers/net/dsa/mv88e6060.c b/drivers/net/dsa/mv88e6060.c
index 9093577..0527f48 100644
--- a/drivers/net/dsa/mv88e6060.c
+++ b/drivers/net/dsa/mv88e6060.c
@@ -15,9 +15,7 @@
 #include <linux/netdevice.h>
 #include <linux/phy.h>
 #include <net/dsa.h>
-
-#define REG_PORT(p)		(8 + (p))
-#define REG_GLOBAL		0x0f
+#include "mv88e6060.h"
 
 static int reg_read(struct dsa_switch *ds, int addr, int reg)
 {
@@ -67,13 +65,14 @@
 	if (bus == NULL)
 		return NULL;
 
-	ret = mdiobus_read(bus, sw_addr + REG_PORT(0), 0x03);
+	ret = mdiobus_read(bus, sw_addr + REG_PORT(0), PORT_SWITCH_ID);
 	if (ret >= 0) {
-		if (ret == 0x0600)
+		if (ret == PORT_SWITCH_ID_6060)
 			return "Marvell 88E6060 (A0)";
-		if (ret == 0x0601 || ret == 0x0602)
+		if (ret == PORT_SWITCH_ID_6060_R1 ||
+		    ret == PORT_SWITCH_ID_6060_R2)
 			return "Marvell 88E6060 (B0)";
-		if ((ret & 0xfff0) == 0x0600)
+		if ((ret & PORT_SWITCH_ID_6060_MASK) == PORT_SWITCH_ID_6060)
 			return "Marvell 88E6060";
 	}
 
@@ -87,22 +86,26 @@
 	unsigned long timeout;
 
 	/* Set all ports to the disabled state. */
-	for (i = 0; i < 6; i++) {
-		ret = REG_READ(REG_PORT(i), 0x04);
-		REG_WRITE(REG_PORT(i), 0x04, ret & 0xfffc);
+	for (i = 0; i < MV88E6060_PORTS; i++) {
+		ret = REG_READ(REG_PORT(i), PORT_CONTROL);
+		REG_WRITE(REG_PORT(i), PORT_CONTROL,
+			  ret & ~PORT_CONTROL_STATE_MASK);
 	}
 
 	/* Wait for transmit queues to drain. */
 	usleep_range(2000, 4000);
 
 	/* Reset the switch. */
-	REG_WRITE(REG_GLOBAL, 0x0a, 0xa130);
+	REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+		  GLOBAL_ATU_CONTROL_SWRESET |
+		  GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
+		  GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
 
 	/* Wait up to one second for reset to complete. */
 	timeout = jiffies + 1 * HZ;
 	while (time_before(jiffies, timeout)) {
-		ret = REG_READ(REG_GLOBAL, 0x00);
-		if ((ret & 0x8000) == 0x0000)
+		ret = REG_READ(REG_GLOBAL, GLOBAL_STATUS);
+		if (ret & GLOBAL_STATUS_INIT_READY)
 			break;
 
 		usleep_range(1000, 2000);
@@ -119,13 +122,15 @@
 	 * set the maximum frame size to 1536 bytes, and mask all
 	 * interrupt sources.
 	 */
-	REG_WRITE(REG_GLOBAL, 0x04, 0x0800);
+	REG_WRITE(REG_GLOBAL, GLOBAL_CONTROL, GLOBAL_CONTROL_MAX_FRAME_1536);
 
 	/* Enable automatic address learning, set the address
 	 * database size to 1024 entries, and set the default aging
 	 * time to 5 minutes.
 	 */
-	REG_WRITE(REG_GLOBAL, 0x0a, 0x2130);
+	REG_WRITE(REG_GLOBAL, GLOBAL_ATU_CONTROL,
+		  GLOBAL_ATU_CONTROL_ATUSIZE_1024 |
+		  GLOBAL_ATU_CONTROL_ATE_AGE_5MIN);
 
 	return 0;
 }
@@ -139,25 +144,30 @@
 	 * state to Forwarding.  Additionally, if this is the CPU
 	 * port, enable Ingress and Egress Trailer tagging mode.
 	 */
-	REG_WRITE(addr, 0x04, dsa_is_cpu_port(ds, p) ?  0x4103 : 0x0003);
+	REG_WRITE(addr, PORT_CONTROL,
+		  dsa_is_cpu_port(ds, p) ?
+			PORT_CONTROL_TRAILER |
+			PORT_CONTROL_INGRESS_MODE |
+			PORT_CONTROL_STATE_FORWARDING :
+			PORT_CONTROL_STATE_FORWARDING);
 
 	/* Port based VLAN map: give each port its own address
 	 * database, allow the CPU port to talk to each of the 'real'
 	 * ports, and allow each of the 'real' ports to only talk to
 	 * the CPU port.
 	 */
-	REG_WRITE(addr, 0x06,
-			((p & 0xf) << 12) |
-			 (dsa_is_cpu_port(ds, p) ?
-				ds->phys_port_mask :
-				(1 << ds->dst->cpu_port)));
+	REG_WRITE(addr, PORT_VLAN_MAP,
+		  ((p & 0xf) << PORT_VLAN_MAP_DBNUM_SHIFT) |
+		   (dsa_is_cpu_port(ds, p) ?
+			ds->phys_port_mask :
+			BIT(ds->dst->cpu_port)));
 
 	/* Port Association Vector: when learning source addresses
 	 * of packets, add the address to the address database using
 	 * a port bitmap that has only the bit for this port set and
 	 * the other bits clear.
 	 */
-	REG_WRITE(addr, 0x0b, 1 << p);
+	REG_WRITE(addr, PORT_ASSOC_VECTOR, BIT(p));
 
 	return 0;
 }
@@ -177,7 +187,7 @@
 	if (ret < 0)
 		return ret;
 
-	for (i = 0; i < 6; i++) {
+	for (i = 0; i < MV88E6060_PORTS; i++) {
 		ret = mv88e6060_setup_port(ds, i);
 		if (ret < 0)
 			return ret;
@@ -188,16 +198,17 @@
 
 static int mv88e6060_set_addr(struct dsa_switch *ds, u8 *addr)
 {
-	REG_WRITE(REG_GLOBAL, 0x01, (addr[0] << 8) | addr[1]);
-	REG_WRITE(REG_GLOBAL, 0x02, (addr[2] << 8) | addr[3]);
-	REG_WRITE(REG_GLOBAL, 0x03, (addr[4] << 8) | addr[5]);
+	/* Use the same MAC Address as FD Pause frames for all ports */
+	REG_WRITE(REG_GLOBAL, GLOBAL_MAC_01, (addr[0] << 9) | addr[1]);
+	REG_WRITE(REG_GLOBAL, GLOBAL_MAC_23, (addr[2] << 8) | addr[3]);
+	REG_WRITE(REG_GLOBAL, GLOBAL_MAC_45, (addr[4] << 8) | addr[5]);
 
 	return 0;
 }
 
 static int mv88e6060_port_to_phy_addr(int port)
 {
-	if (port >= 0 && port <= 5)
+	if (port >= 0 && port < MV88E6060_PORTS)
 		return port;
 	return -1;
 }
@@ -225,54 +236,6 @@
 	return reg_write(ds, addr, regnum, val);
 }
 
-static void mv88e6060_poll_link(struct dsa_switch *ds)
-{
-	int i;
-
-	for (i = 0; i < DSA_MAX_PORTS; i++) {
-		struct net_device *dev;
-		int uninitialized_var(port_status);
-		int link;
-		int speed;
-		int duplex;
-		int fc;
-
-		dev = ds->ports[i];
-		if (dev == NULL)
-			continue;
-
-		link = 0;
-		if (dev->flags & IFF_UP) {
-			port_status = reg_read(ds, REG_PORT(i), 0x00);
-			if (port_status < 0)
-				continue;
-
-			link = !!(port_status & 0x1000);
-		}
-
-		if (!link) {
-			if (netif_carrier_ok(dev)) {
-				netdev_info(dev, "link down\n");
-				netif_carrier_off(dev);
-			}
-			continue;
-		}
-
-		speed = (port_status & 0x0100) ? 100 : 10;
-		duplex = (port_status & 0x0200) ? 1 : 0;
-		fc = ((port_status & 0xc000) == 0xc000) ? 1 : 0;
-
-		if (!netif_carrier_ok(dev)) {
-			netdev_info(dev,
-				    "link up, %d Mb/s, %s duplex, flow control %sabled\n",
-				    speed,
-				    duplex ? "full" : "half",
-				    fc ? "en" : "dis");
-			netif_carrier_on(dev);
-		}
-	}
-}
-
 static struct dsa_switch_driver mv88e6060_switch_driver = {
 	.tag_protocol	= DSA_TAG_PROTO_TRAILER,
 	.probe		= mv88e6060_probe,
@@ -280,7 +243,6 @@
 	.set_addr	= mv88e6060_set_addr,
 	.phy_read	= mv88e6060_phy_read,
 	.phy_write	= mv88e6060_phy_write,
-	.poll_link	= mv88e6060_poll_link,
 };
 
 static int __init mv88e6060_init(void)
diff --git a/drivers/net/dsa/mv88e6060.h b/drivers/net/dsa/mv88e6060.h
new file mode 100644
index 0000000..cc9b2ed
--- /dev/null
+++ b/drivers/net/dsa/mv88e6060.h
@@ -0,0 +1,111 @@
+/*
+ * drivers/net/dsa/mv88e6060.h - Marvell 88e6060 switch chip support
+ * Copyright (c) 2015 Neil Armstrong
+ *
+ * Based on mv88e6xxx.h
+ * Copyright (c) 2008 Marvell Semiconductor
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef __MV88E6060_H
+#define __MV88E6060_H
+
+#define MV88E6060_PORTS	6
+
+#define REG_PORT(p)		(0x8 + (p))
+#define PORT_STATUS		0x00
+#define PORT_STATUS_PAUSE_EN	BIT(15)
+#define PORT_STATUS_MY_PAUSE	BIT(14)
+#define PORT_STATUS_FC		(PORT_STATUS_MY_PAUSE | PORT_STATUS_PAUSE_EN)
+#define PORT_STATUS_RESOLVED	BIT(13)
+#define PORT_STATUS_LINK	BIT(12)
+#define PORT_STATUS_PORTMODE	BIT(11)
+#define PORT_STATUS_PHYMODE	BIT(10)
+#define PORT_STATUS_DUPLEX	BIT(9)
+#define PORT_STATUS_SPEED	BIT(8)
+#define PORT_SWITCH_ID		0x03
+#define PORT_SWITCH_ID_6060	0x0600
+#define PORT_SWITCH_ID_6060_MASK	0xfff0
+#define PORT_SWITCH_ID_6060_R1	0x0601
+#define PORT_SWITCH_ID_6060_R2	0x0602
+#define PORT_CONTROL		0x04
+#define PORT_CONTROL_FORCE_FLOW_CTRL	BIT(15)
+#define PORT_CONTROL_TRAILER	BIT(14)
+#define PORT_CONTROL_HEADER	BIT(11)
+#define PORT_CONTROL_INGRESS_MODE	BIT(8)
+#define PORT_CONTROL_VLAN_TUNNEL	BIT(7)
+#define PORT_CONTROL_STATE_MASK	0x03
+#define PORT_CONTROL_STATE_DISABLED	0x00
+#define PORT_CONTROL_STATE_BLOCKING	0x01
+#define PORT_CONTROL_STATE_LEARNING	0x02
+#define PORT_CONTROL_STATE_FORWARDING	0x03
+#define PORT_VLAN_MAP		0x06
+#define PORT_VLAN_MAP_DBNUM_SHIFT	12
+#define PORT_VLAN_MAP_TABLE_MASK	0x1f
+#define PORT_ASSOC_VECTOR	0x0b
+#define PORT_ASSOC_VECTOR_MONITOR	BIT(15)
+#define PORT_ASSOC_VECTOR_PAV_MASK	0x1f
+#define PORT_RX_CNTR		0x10
+#define PORT_TX_CNTR		0x11
+
+#define REG_GLOBAL		0x0f
+#define GLOBAL_STATUS		0x00
+#define GLOBAL_STATUS_SW_MODE_MASK	(0x3 << 12)
+#define GLOBAL_STATUS_SW_MODE_0	(0x0 << 12)
+#define GLOBAL_STATUS_SW_MODE_1	(0x1 << 12)
+#define GLOBAL_STATUS_SW_MODE_2	(0x2 << 12)
+#define GLOBAL_STATUS_SW_MODE_3	(0x3 << 12)
+#define GLOBAL_STATUS_INIT_READY	BIT(11)
+#define GLOBAL_STATUS_ATU_FULL		BIT(3)
+#define GLOBAL_STATUS_ATU_DONE		BIT(2)
+#define GLOBAL_STATUS_PHY_INT	BIT(1)
+#define GLOBAL_STATUS_EEINT	BIT(0)
+#define GLOBAL_MAC_01		0x01
+#define GLOBAL_MAC_01_DIFF_ADDR	BIT(8)
+#define GLOBAL_MAC_23		0x02
+#define GLOBAL_MAC_45		0x03
+#define GLOBAL_CONTROL		0x04
+#define GLOBAL_CONTROL_DISCARD_EXCESS	BIT(13)
+#define GLOBAL_CONTROL_MAX_FRAME_1536	BIT(10)
+#define GLOBAL_CONTROL_RELOAD_EEPROM	BIT(9)
+#define GLOBAL_CONTROL_CTRMODE		BIT(8)
+#define GLOBAL_CONTROL_ATU_FULL_EN	BIT(3)
+#define GLOBAL_CONTROL_ATU_DONE_EN	BIT(2)
+#define GLOBAL_CONTROL_PHYINT_EN	BIT(1)
+#define GLOBAL_CONTROL_EEPROM_DONE_EN	BIT(0)
+#define GLOBAL_ATU_CONTROL	0x0a
+#define GLOBAL_ATU_CONTROL_SWRESET	BIT(15)
+#define GLOBAL_ATU_CONTROL_LEARNDIS	BIT(14)
+#define GLOBAL_ATU_CONTROL_ATUSIZE_256	(0x0 << 12)
+#define GLOBAL_ATU_CONTROL_ATUSIZE_512	(0x1 << 12)
+#define GLOBAL_ATU_CONTROL_ATUSIZE_1024	(0x2 << 12)
+#define GLOBAL_ATU_CONTROL_ATE_AGE_SHIFT	4
+#define GLOBAL_ATU_CONTROL_ATE_AGE_MASK	(0xff << 4)
+#define GLOBAL_ATU_CONTROL_ATE_AGE_5MIN	(0x13 << 4)
+#define GLOBAL_ATU_OP		0x0b
+#define GLOBAL_ATU_OP_BUSY	BIT(15)
+#define GLOBAL_ATU_OP_NOP		(0 << 12)
+#define GLOBAL_ATU_OP_FLUSH_ALL	((1 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_UNLOCKED	((2 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_LOAD_DB		((3 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_GET_NEXT_DB	((4 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_DB		((5 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_OP_FLUSH_UNLOCKED_DB ((6 << 12) | GLOBAL_ATU_OP_BUSY)
+#define GLOBAL_ATU_DATA		0x0c
+#define GLOBAL_ATU_DATA_PORT_VECTOR_MASK	0x3f0
+#define GLOBAL_ATU_DATA_PORT_VECTOR_SHIFT	4
+#define GLOBAL_ATU_DATA_STATE_MASK		0x0f
+#define GLOBAL_ATU_DATA_STATE_UNUSED		0x00
+#define GLOBAL_ATU_DATA_STATE_UC_STATIC		0x0e
+#define GLOBAL_ATU_DATA_STATE_UC_LOCKED		0x0f
+#define GLOBAL_ATU_DATA_STATE_MC_STATIC		0x07
+#define GLOBAL_ATU_DATA_STATE_MC_LOCKED		0x0e
+#define GLOBAL_ATU_MAC_01	0x0d
+#define GLOBAL_ATU_MAC_23	0x0e
+#define GLOBAL_ATU_MAC_45	0x0f
+
+#endif
diff --git a/drivers/net/ethernet/Kconfig b/drivers/net/ethernet/Kconfig
index 05aa759..955d06b 100644
--- a/drivers/net/ethernet/Kconfig
+++ b/drivers/net/ethernet/Kconfig
@@ -78,7 +78,6 @@
 source "drivers/net/ethernet/intel/Kconfig"
 source "drivers/net/ethernet/i825xx/Kconfig"
 source "drivers/net/ethernet/xscale/Kconfig"
-source "drivers/net/ethernet/icplus/Kconfig"
 
 config JME
 	tristate "JMicron(R) PCI-Express Gigabit Ethernet support"
diff --git a/drivers/net/ethernet/Makefile b/drivers/net/ethernet/Makefile
index ddfc808..4a2ee98 100644
--- a/drivers/net/ethernet/Makefile
+++ b/drivers/net/ethernet/Makefile
@@ -41,7 +41,6 @@
 obj-$(CONFIG_NET_VENDOR_INTEL) += intel/
 obj-$(CONFIG_NET_VENDOR_I825XX) += i825xx/
 obj-$(CONFIG_NET_VENDOR_XSCALE) += xscale/
-obj-$(CONFIG_IP1000) += icplus/
 obj-$(CONFIG_JME) += jme.o
 obj-$(CONFIG_KORINA) += korina.o
 obj-$(CONFIG_LANTIQ_ETOP) += lantiq_etop.o
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
index f1d62d5..c9b0367 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
@@ -13207,7 +13207,7 @@
 
 	/* VF with OLD Hypervisor or old PF do not support filtering */
 	if (IS_PF(bp)) {
-		if (CHIP_IS_E1x(bp))
+		if (chip_is_e1x)
 			bp->accept_any_vlan = true;
 		else
 			dev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index f683d97..b895044 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -560,7 +560,7 @@
 #endif
 
 /* For PCI-E Advanced Error Recovery (AER) Interface */
-static struct pci_error_handlers liquidio_err_handler = {
+static const struct pci_error_handlers liquidio_err_handler = {
 	.error_detected = liquidio_pcie_error_detected,
 	.mmio_enabled	= liquidio_pcie_mmio_enabled,
 	.slot_reset	= liquidio_pcie_slot_reset,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index a937772..7f709cb 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -1583,8 +1583,14 @@
 static void nicvf_remove(struct pci_dev *pdev)
 {
 	struct net_device *netdev = pci_get_drvdata(pdev);
-	struct nicvf *nic = netdev_priv(netdev);
-	struct net_device *pnetdev = nic->pnicvf->netdev;
+	struct nicvf *nic;
+	struct net_device *pnetdev;
+
+	if (!netdev)
+		return;
+
+	nic = netdev_priv(netdev);
+	pnetdev = nic->pnicvf->netdev;
 
 	/* Check if this Qset is assigned to different VF.
 	 * If yes, clean primary and all secondary Qsets.
diff --git a/drivers/net/ethernet/dlink/Kconfig b/drivers/net/ethernet/dlink/Kconfig
index f6e858d..ebdc832 100644
--- a/drivers/net/ethernet/dlink/Kconfig
+++ b/drivers/net/ethernet/dlink/Kconfig
@@ -17,15 +17,16 @@
 if NET_VENDOR_DLINK
 
 config DL2K
-	tristate "DL2000/TC902x-based Gigabit Ethernet support"
+	tristate "DL2000/TC902x/IP1000A-based Gigabit Ethernet support"
 	depends on PCI
 	select CRC32
 	---help---
-	  This driver supports DL2000/TC902x-based Gigabit ethernet cards,
+	  This driver supports DL2000/TC902x/IP1000A-based Gigabit ethernet cards,
 	  which includes
 	  D-Link DGE-550T Gigabit Ethernet Adapter.
 	  D-Link DL2000-based Gigabit Ethernet Adapter.
 	  Sundance/Tamarack TC902x Gigabit Ethernet Adapter.
+	  ICPlus IP1000A-based cards
 
 	  To compile this driver as a module, choose M here: the
 	  module will be called dl2k.
diff --git a/drivers/net/ethernet/dlink/dl2k.c b/drivers/net/ethernet/dlink/dl2k.c
index cf0a5fc..ccca479 100644
--- a/drivers/net/ethernet/dlink/dl2k.c
+++ b/drivers/net/ethernet/dlink/dl2k.c
@@ -253,6 +253,19 @@
 	if (err)
 		goto err_out_unmap_rx;
 
+	if (np->chip_id == CHIP_IP1000A &&
+	    (np->pdev->revision == 0x40 || np->pdev->revision == 0x41)) {
+		/* PHY magic taken from ipg driver, undocumented registers */
+		mii_write(dev, np->phy_addr, 31, 0x0001);
+		mii_write(dev, np->phy_addr, 27, 0x01e0);
+		mii_write(dev, np->phy_addr, 31, 0x0002);
+		mii_write(dev, np->phy_addr, 27, 0xeb8e);
+		mii_write(dev, np->phy_addr, 31, 0x0000);
+		mii_write(dev, np->phy_addr, 30, 0x005e);
+		/* advertise 1000BASE-T half & full duplex, prefer MASTER */
+		mii_write(dev, np->phy_addr, MII_CTRL1000, 0x0700);
+	}
+
 	/* Fiber device? */
 	np->phy_media = (dr16(ASICCtrl) & PhyMedia) ? 1 : 0;
 	np->link_status = 0;
@@ -361,6 +374,11 @@
 	for (i = 0; i < 6; i++)
 		dev->dev_addr[i] = psrom->mac_addr[i];
 
+	if (np->chip_id == CHIP_IP1000A) {
+		np->led_mode = psrom->led_mode;
+		return 0;
+	}
+
 	if (np->pdev->vendor != PCI_VENDOR_ID_DLINK) {
 		return 0;
 	}
@@ -406,6 +424,28 @@
 	return 0;
 }
 
+static void rio_set_led_mode(struct net_device *dev)
+{
+	struct netdev_private *np = netdev_priv(dev);
+	void __iomem *ioaddr = np->ioaddr;
+	u32 mode;
+
+	if (np->chip_id != CHIP_IP1000A)
+		return;
+
+	mode = dr32(ASICCtrl);
+	mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
+
+	if (np->led_mode & 0x01)
+		mode |= IPG_AC_LED_MODE;
+	if (np->led_mode & 0x02)
+		mode |= IPG_AC_LED_MODE_BIT_1;
+	if (np->led_mode & 0x08)
+		mode |= IPG_AC_LED_SPEED;
+
+	dw32(ASICCtrl, mode);
+}
+
 static int
 rio_open (struct net_device *dev)
 {
@@ -424,6 +464,8 @@
 	     GlobalReset | DMAReset | FIFOReset | NetworkReset | HostReset);
 	mdelay(10);
 
+	rio_set_led_mode(dev);
+
 	/* DebugCtrl bit 4, 5, 9 must set */
 	dw32(DebugCtrl, dr32(DebugCtrl) | 0x0230);
 
@@ -433,9 +475,13 @@
 
 	alloc_list (dev);
 
-	/* Get station address */
-	for (i = 0; i < 6; i++)
-		dw8(StationAddr0 + i, dev->dev_addr[i]);
+	/* Set station address */
+	/* 16 or 32-bit access is required by TC9020 datasheet but 8-bit works
+	 * too. However, it doesn't work on IP1000A so we use 16-bit access.
+	 */
+	for (i = 0; i < 3; i++)
+		dw16(StationAddr0 + 2 * i,
+		     cpu_to_le16(((u16 *)dev->dev_addr)[i]));
 
 	set_multicast (dev);
 	if (np->coalesce) {
@@ -780,6 +826,7 @@
 				break;
 			mdelay (1);
 		}
+		rio_set_led_mode(dev);
 		rio_free_tx (dev, 1);
 		/* Reset TFDListPtr */
 		dw32(TFDListPtr0, np->tx_ring_dma +
@@ -799,6 +846,7 @@
 				break;
 			mdelay (1);
 		}
+		rio_set_led_mode(dev);
 		/* Let TxStartThresh stay default value */
 	}
 	/* Maximum Collisions */
@@ -965,6 +1013,7 @@
 			dev->name, int_status);
 		dw16(ASICCtrl + 2, GlobalReset | HostReset);
 		mdelay (500);
+		rio_set_led_mode(dev);
 	}
 }
 
diff --git a/drivers/net/ethernet/dlink/dl2k.h b/drivers/net/ethernet/dlink/dl2k.h
index 23c07b0..8f4f612 100644
--- a/drivers/net/ethernet/dlink/dl2k.h
+++ b/drivers/net/ethernet/dlink/dl2k.h
@@ -211,6 +211,10 @@
 	ResetBusy = 0x0400,
 };
 
+#define IPG_AC_LED_MODE		BIT(14)
+#define IPG_AC_LED_SPEED	BIT(27)
+#define IPG_AC_LED_MODE_BIT_1	BIT(29)
+
 /* Transmit Frame Control bits */
 enum TFC_bits {
 	DwordAlign = 0x00000000,
@@ -332,7 +336,10 @@
 	u16 asic_ctrl;		/* 0x02 */
 	u16 sub_vendor_id;	/* 0x04 */
 	u16 sub_system_id;	/* 0x06 */
-	u16 reserved1[12];	/* 0x08-0x1f */
+	u16 pci_base_1;		/* 0x08 (IP1000A only) */
+	u16 pci_base_2;		/* 0x0a (IP1000A only) */
+	u16 led_mode;		/* 0x0c (IP1000A only) */
+	u16 reserved1[9];	/* 0x0e-0x1f */
 	u8 mac_addr[6];		/* 0x20-0x25 */
 	u8 reserved2[10];	/* 0x26-0x2f */
 	u8 sib[204];		/* 0x30-0xfb */
@@ -397,6 +404,7 @@
 	u16 advertising;	/* NWay media advertisement */
 	u16 negotiate;		/* Negotiated media */
 	int phy_addr;		/* PHY addresses. */
+	u16 led_mode;		/* LED mode read from EEPROM (IP1000A only) */
 };
 
 /* The station address location in the EEPROM. */
@@ -407,10 +415,15 @@
         class_mask              of the class are honored during the comparison.
         driver_data             Data private to the driver.
 */
+#define CHIP_IP1000A	1
 
 static const struct pci_device_id rio_pci_tbl[] = {
 	{0x1186, 0x4000, PCI_ANY_ID, PCI_ANY_ID, },
 	{0x13f0, 0x1021, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VDEVICE(SUNDANCE,	0x1023), CHIP_IP1000A },
+	{ PCI_VDEVICE(SUNDANCE,	0x2021), CHIP_IP1000A },
+	{ PCI_VDEVICE(DLINK,	0x9021), CHIP_IP1000A },
+	{ PCI_VDEVICE(DLINK,	0x4020), CHIP_IP1000A },
 	{ }
 };
 MODULE_DEVICE_TABLE (pci, rio_pci_tbl);
diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
index f4cb8e4..734f655 100644
--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
+++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
@@ -1062,9 +1062,7 @@
 static int be_set_rss_hash_opts(struct be_adapter *adapter,
 				struct ethtool_rxnfc *cmd)
 {
-	struct be_rx_obj *rxo;
-	int status = 0, i, j;
-	u8 rsstable[128];
+	int status;
 	u32 rss_flags = adapter->rss_info.rss_flags;
 
 	if (cmd->data != L3_RSS_FLAGS &&
@@ -1113,20 +1111,11 @@
 	}
 
 	if (rss_flags == adapter->rss_info.rss_flags)
-		return status;
-
-	if (be_multi_rxq(adapter)) {
-		for (j = 0; j < 128; j += adapter->num_rss_qs) {
-			for_all_rss_queues(adapter, rxo, i) {
-				if ((j + i) >= 128)
-					break;
-				rsstable[j + i] = rxo->rss_id;
-			}
-		}
-	}
+		return 0;
 
 	status = be_cmd_rss_config(adapter, adapter->rss_info.rsstable,
-				   rss_flags, 128, adapter->rss_info.rss_hkey);
+				   rss_flags, RSS_INDIR_TABLE_LEN,
+				   adapter->rss_info.rss_hkey);
 	if (!status)
 		adapter->rss_info.rss_flags = rss_flags;
 
diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
index eb48a97..b6ad029 100644
--- a/drivers/net/ethernet/emulex/benet/be_main.c
+++ b/drivers/net/ethernet/emulex/benet/be_main.c
@@ -3518,7 +3518,7 @@
 
 	netdev_rss_key_fill(rss_key, RSS_HASH_KEY_LEN);
 	rc = be_cmd_rss_config(adapter, rss->rsstable, rss->rss_flags,
-			       128, rss_key);
+			       RSS_INDIR_TABLE_LEN, rss_key);
 	if (rc) {
 		rss->rss_flags = RSS_ENABLE_NONE;
 		return rc;
diff --git a/drivers/net/ethernet/icplus/Kconfig b/drivers/net/ethernet/icplus/Kconfig
deleted file mode 100644
index 14a66e9..0000000
--- a/drivers/net/ethernet/icplus/Kconfig
+++ /dev/null
@@ -1,13 +0,0 @@
-#
-# IC Plus device configuration
-#
-
-config IP1000
-	tristate "IP1000 Gigabit Ethernet support"
-	depends on PCI
-	select MII
-	---help---
-	  This driver supports IP1000 gigabit Ethernet cards.
-
-	  To compile this driver as a module, choose M here: the module
-	  will be called ipg.  This is recommended.
diff --git a/drivers/net/ethernet/icplus/Makefile b/drivers/net/ethernet/icplus/Makefile
deleted file mode 100644
index 5bc87c1..0000000
--- a/drivers/net/ethernet/icplus/Makefile
+++ /dev/null
@@ -1,5 +0,0 @@
-#
-# Makefile for the IC Plus device drivers
-#
-
-obj-$(CONFIG_IP1000) += ipg.o
diff --git a/drivers/net/ethernet/icplus/ipg.c b/drivers/net/ethernet/icplus/ipg.c
deleted file mode 100644
index c3b6af8..0000000
--- a/drivers/net/ethernet/icplus/ipg.c
+++ /dev/null
@@ -1,2300 +0,0 @@
-/*
- * ipg.c: Device Driver for the IP1000 Gigabit Ethernet Adapter
- *
- * Copyright (C) 2003, 2007  IC Plus Corp
- *
- * Original Author:
- *
- *   Craig Rich
- *   Sundance Technology, Inc.
- *   www.sundanceti.com
- *   craig_rich@sundanceti.com
- *
- * Current Maintainer:
- *
- *   Sorbica Shieh.
- *   http://www.icplus.com.tw
- *   sorbica@icplus.com.tw
- *
- *   Jesse Huang
- *   http://www.icplus.com.tw
- *   jesse@icplus.com.tw
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/crc32.h>
-#include <linux/ethtool.h>
-#include <linux/interrupt.h>
-#include <linux/gfp.h>
-#include <linux/mii.h>
-#include <linux/mutex.h>
-
-#include <asm/div64.h>
-
-#define IPG_RX_RING_BYTES	(sizeof(struct ipg_rx) * IPG_RFDLIST_LENGTH)
-#define IPG_TX_RING_BYTES	(sizeof(struct ipg_tx) * IPG_TFDLIST_LENGTH)
-#define IPG_RESET_MASK \
-	(IPG_AC_GLOBAL_RESET | IPG_AC_RX_RESET | IPG_AC_TX_RESET | \
-	 IPG_AC_DMA | IPG_AC_FIFO | IPG_AC_NETWORK | IPG_AC_HOST | \
-	 IPG_AC_AUTO_INIT)
-
-#define ipg_w32(val32, reg)	iowrite32((val32), ioaddr + (reg))
-#define ipg_w16(val16, reg)	iowrite16((val16), ioaddr + (reg))
-#define ipg_w8(val8, reg)	iowrite8((val8), ioaddr + (reg))
-
-#define ipg_r32(reg)		ioread32(ioaddr + (reg))
-#define ipg_r16(reg)		ioread16(ioaddr + (reg))
-#define ipg_r8(reg)		ioread8(ioaddr + (reg))
-
-enum {
-	netdev_io_size = 128
-};
-
-#include "ipg.h"
-#define DRV_NAME	"ipg"
-
-MODULE_AUTHOR("IC Plus Corp. 2003");
-MODULE_DESCRIPTION("IC Plus IP1000 Gigabit Ethernet Adapter Linux Driver");
-MODULE_LICENSE("GPL");
-
-/*
- * Defaults
- */
-#define IPG_MAX_RXFRAME_SIZE	0x0600
-#define IPG_RXFRAG_SIZE		0x0600
-#define IPG_RXSUPPORT_SIZE	0x0600
-#define IPG_IS_JUMBO		false
-
-/*
- * Variable record -- index by leading revision/length
- * Revision/Length(=N*4), Address1, Data1, Address2, Data2,...,AddressN,DataN
- */
-static const unsigned short DefaultPhyParam[] = {
-	/* 11/12/03 IP1000A v1-3 rev=0x40 */
-	/*--------------------------------------------------------------------------
-	(0x4000|(15*4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 22, 0x85bd, 24, 0xfff2,
-				 27, 0x0c10, 28, 0x0c10, 29, 0x2c10, 31, 0x0003, 23, 0x92f6,
-				 31, 0x0000, 23, 0x003d, 30, 0x00de, 20, 0x20e7,  9, 0x0700,
-	  --------------------------------------------------------------------------*/
-	/* 12/17/03 IP1000A v1-4 rev=0x40 */
-	(0x4000 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
-	    0x0000,
-	30, 0x005e, 9, 0x0700,
-	/* 01/09/04 IP1000A v1-5 rev=0x41 */
-	(0x4100 | (07 * 4)), 31, 0x0001, 27, 0x01e0, 31, 0x0002, 27, 0xeb8e, 31,
-	    0x0000,
-	30, 0x005e, 9, 0x0700,
-	0x0000
-};
-
-static const char * const ipg_brand_name[] = {
-	"IC PLUS IP1000 1000/100/10 based NIC",
-	"Sundance Technology ST2021 based NIC",
-	"Tamarack Microelectronics TC9020/9021 based NIC",
-	"D-Link NIC IP1000A"
-};
-
-static const struct pci_device_id ipg_pci_tbl[] = {
-	{ PCI_VDEVICE(SUNDANCE,	0x1023), 0 },
-	{ PCI_VDEVICE(SUNDANCE,	0x2021), 1 },
-	{ PCI_VDEVICE(DLINK,	0x9021), 2 },
-	{ PCI_VDEVICE(DLINK,	0x4020), 3 },
-	{ 0, }
-};
-
-MODULE_DEVICE_TABLE(pci, ipg_pci_tbl);
-
-static inline void __iomem *ipg_ioaddr(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	return sp->ioaddr;
-}
-
-#ifdef IPG_DEBUG
-static void ipg_dump_rfdlist(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	unsigned int i;
-	u32 offset;
-
-	IPG_DEBUG_MSG("_dump_rfdlist\n");
-
-	netdev_info(dev, "rx_current = %02x\n", sp->rx_current);
-	netdev_info(dev, "rx_dirty   = %02x\n", sp->rx_dirty);
-	netdev_info(dev, "RFDList start address = %016lx\n",
-		    (unsigned long)sp->rxd_map);
-	netdev_info(dev, "RFDListPtr register   = %08x%08x\n",
-		    ipg_r32(IPG_RFDLISTPTR1), ipg_r32(IPG_RFDLISTPTR0));
-
-	for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
-		offset = (u32) &sp->rxd[i].next_desc - (u32) sp->rxd;
-		netdev_info(dev, "%02x %04x RFDNextPtr = %016lx\n",
-			    i, offset, (unsigned long)sp->rxd[i].next_desc);
-		offset = (u32) &sp->rxd[i].rfs - (u32) sp->rxd;
-		netdev_info(dev, "%02x %04x RFS        = %016lx\n",
-			    i, offset, (unsigned long)sp->rxd[i].rfs);
-		offset = (u32) &sp->rxd[i].frag_info - (u32) sp->rxd;
-		netdev_info(dev, "%02x %04x frag_info   = %016lx\n",
-			    i, offset, (unsigned long)sp->rxd[i].frag_info);
-	}
-}
-
-static void ipg_dump_tfdlist(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	unsigned int i;
-	u32 offset;
-
-	IPG_DEBUG_MSG("_dump_tfdlist\n");
-
-	netdev_info(dev, "tx_current         = %02x\n", sp->tx_current);
-	netdev_info(dev, "tx_dirty = %02x\n", sp->tx_dirty);
-	netdev_info(dev, "TFDList start address = %016lx\n",
-		    (unsigned long) sp->txd_map);
-	netdev_info(dev, "TFDListPtr register   = %08x%08x\n",
-		    ipg_r32(IPG_TFDLISTPTR1), ipg_r32(IPG_TFDLISTPTR0));
-
-	for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
-		offset = (u32) &sp->txd[i].next_desc - (u32) sp->txd;
-		netdev_info(dev, "%02x %04x TFDNextPtr = %016lx\n",
-			    i, offset, (unsigned long)sp->txd[i].next_desc);
-
-		offset = (u32) &sp->txd[i].tfc - (u32) sp->txd;
-		netdev_info(dev, "%02x %04x TFC        = %016lx\n",
-			    i, offset, (unsigned long) sp->txd[i].tfc);
-		offset = (u32) &sp->txd[i].frag_info - (u32) sp->txd;
-		netdev_info(dev, "%02x %04x frag_info   = %016lx\n",
-			    i, offset, (unsigned long) sp->txd[i].frag_info);
-	}
-}
-#endif
-
-static void ipg_write_phy_ctl(void __iomem *ioaddr, u8 data)
-{
-	ipg_w8(IPG_PC_RSVD_MASK & data, PHY_CTRL);
-	ndelay(IPG_PC_PHYCTRLWAIT_NS);
-}
-
-static void ipg_drive_phy_ctl_low_high(void __iomem *ioaddr, u8 data)
-{
-	ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | data);
-	ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | data);
-}
-
-static void send_three_state(void __iomem *ioaddr, u8 phyctrlpolarity)
-{
-	phyctrlpolarity |= (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR;
-
-	ipg_drive_phy_ctl_low_high(ioaddr, phyctrlpolarity);
-}
-
-static void send_end(void __iomem *ioaddr, u8 phyctrlpolarity)
-{
-	ipg_w8((IPG_PC_MGMTCLK_LO | (IPG_PC_MGMTDATA & 0) | IPG_PC_MGMTDIR |
-		phyctrlpolarity) & IPG_PC_RSVD_MASK, PHY_CTRL);
-}
-
-static u16 read_phy_bit(void __iomem *ioaddr, u8 phyctrlpolarity)
-{
-	u16 bit_data;
-
-	ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | phyctrlpolarity);
-
-	bit_data = ((ipg_r8(PHY_CTRL) & IPG_PC_MGMTDATA) >> 1) & 1;
-
-	ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | phyctrlpolarity);
-
-	return bit_data;
-}
-
-/*
- * Read a register from the Physical Layer device located
- * on the IPG NIC, using the IPG PHYCTRL register.
- */
-static int mdio_read(struct net_device *dev, int phy_id, int phy_reg)
-{
-	void __iomem *ioaddr = ipg_ioaddr(dev);
-	/*
-	 * The GMII mangement frame structure for a read is as follows:
-	 *
-	 * |Preamble|st|op|phyad|regad|ta|      data      |idle|
-	 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z   |
-	 *
-	 * <32 1s> = 32 consecutive logic 1 values
-	 * A = bit of Physical Layer device address (MSB first)
-	 * R = bit of register address (MSB first)
-	 * z = High impedance state
-	 * D = bit of read data (MSB first)
-	 *
-	 * Transmission order is 'Preamble' field first, bits transmitted
-	 * left to right (first to last).
-	 */
-	struct {
-		u32 field;
-		unsigned int len;
-	} p[] = {
-		{ GMII_PREAMBLE,	32 },	/* Preamble */
-		{ GMII_ST,		2  },	/* ST */
-		{ GMII_READ,		2  },	/* OP */
-		{ phy_id,		5  },	/* PHYAD */
-		{ phy_reg,		5  },	/* REGAD */
-		{ 0x0000,		2  },	/* TA */
-		{ 0x0000,		16 },	/* DATA */
-		{ 0x0000,		1  }	/* IDLE */
-	};
-	unsigned int i, j;
-	u8 polarity, data;
-
-	polarity  = ipg_r8(PHY_CTRL);
-	polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
-
-	/* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
-	for (j = 0; j < 5; j++) {
-		for (i = 0; i < p[j].len; i++) {
-			/* For each variable length field, the MSB must be
-			 * transmitted first. Rotate through the field bits,
-			 * starting with the MSB, and move each bit into the
-			 * the 1st (2^1) bit position (this is the bit position
-			 * corresponding to the MgmtData bit of the PhyCtrl
-			 * register for the IPG).
-			 *
-			 * Example: ST = 01;
-			 *
-			 *          First write a '0' to bit 1 of the PhyCtrl
-			 *          register, then write a '1' to bit 1 of the
-			 *          PhyCtrl register.
-			 *
-			 * To do this, right shift the MSB of ST by the value:
-			 * [field length - 1 - #ST bits already written]
-			 * then left shift this result by 1.
-			 */
-			data  = (p[j].field >> (p[j].len - 1 - i)) << 1;
-			data &= IPG_PC_MGMTDATA;
-			data |= polarity | IPG_PC_MGMTDIR;
-
-			ipg_drive_phy_ctl_low_high(ioaddr, data);
-		}
-	}
-
-	send_three_state(ioaddr, polarity);
-
-	read_phy_bit(ioaddr, polarity);
-
-	/*
-	 * For a read cycle, the bits for the next two fields (TA and
-	 * DATA) are driven by the PHY (the IPG reads these bits).
-	 */
-	for (i = 0; i < p[6].len; i++) {
-		p[6].field |=
-		    (read_phy_bit(ioaddr, polarity) << (p[6].len - 1 - i));
-	}
-
-	send_three_state(ioaddr, polarity);
-	send_three_state(ioaddr, polarity);
-	send_three_state(ioaddr, polarity);
-	send_end(ioaddr, polarity);
-
-	/* Return the value of the DATA field. */
-	return p[6].field;
-}
-
-/*
- * Write to a register from the Physical Layer device located
- * on the IPG NIC, using the IPG PHYCTRL register.
- */
-static void mdio_write(struct net_device *dev, int phy_id, int phy_reg, int val)
-{
-	void __iomem *ioaddr = ipg_ioaddr(dev);
-	/*
-	 * The GMII mangement frame structure for a read is as follows:
-	 *
-	 * |Preamble|st|op|phyad|regad|ta|      data      |idle|
-	 * |< 32 1s>|01|10|AAAAA|RRRRR|z0|DDDDDDDDDDDDDDDD|z   |
-	 *
-	 * <32 1s> = 32 consecutive logic 1 values
-	 * A = bit of Physical Layer device address (MSB first)
-	 * R = bit of register address (MSB first)
-	 * z = High impedance state
-	 * D = bit of write data (MSB first)
-	 *
-	 * Transmission order is 'Preamble' field first, bits transmitted
-	 * left to right (first to last).
-	 */
-	struct {
-		u32 field;
-		unsigned int len;
-	} p[] = {
-		{ GMII_PREAMBLE,	32 },	/* Preamble */
-		{ GMII_ST,		2  },	/* ST */
-		{ GMII_WRITE,		2  },	/* OP */
-		{ phy_id,		5  },	/* PHYAD */
-		{ phy_reg,		5  },	/* REGAD */
-		{ 0x0002,		2  },	/* TA */
-		{ val & 0xffff,		16 },	/* DATA */
-		{ 0x0000,		1  }	/* IDLE */
-	};
-	unsigned int i, j;
-	u8 polarity, data;
-
-	polarity  = ipg_r8(PHY_CTRL);
-	polarity &= (IPG_PC_DUPLEX_POLARITY | IPG_PC_LINK_POLARITY);
-
-	/* Create the Preamble, ST, OP, PHYAD, and REGAD field. */
-	for (j = 0; j < 7; j++) {
-		for (i = 0; i < p[j].len; i++) {
-			/* For each variable length field, the MSB must be
-			 * transmitted first. Rotate through the field bits,
-			 * starting with the MSB, and move each bit into the
-			 * the 1st (2^1) bit position (this is the bit position
-			 * corresponding to the MgmtData bit of the PhyCtrl
-			 * register for the IPG).
-			 *
-			 * Example: ST = 01;
-			 *
-			 *          First write a '0' to bit 1 of the PhyCtrl
-			 *          register, then write a '1' to bit 1 of the
-			 *          PhyCtrl register.
-			 *
-			 * To do this, right shift the MSB of ST by the value:
-			 * [field length - 1 - #ST bits already written]
-			 * then left shift this result by 1.
-			 */
-			data  = (p[j].field >> (p[j].len - 1 - i)) << 1;
-			data &= IPG_PC_MGMTDATA;
-			data |= polarity | IPG_PC_MGMTDIR;
-
-			ipg_drive_phy_ctl_low_high(ioaddr, data);
-		}
-	}
-
-	/* The last cycle is a tri-state, so read from the PHY. */
-	ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_LO | polarity);
-	ipg_r8(PHY_CTRL);
-	ipg_write_phy_ctl(ioaddr, IPG_PC_MGMTCLK_HI | polarity);
-}
-
-static void ipg_set_led_mode(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	u32 mode;
-
-	mode = ipg_r32(ASIC_CTRL);
-	mode &= ~(IPG_AC_LED_MODE_BIT_1 | IPG_AC_LED_MODE | IPG_AC_LED_SPEED);
-
-	if ((sp->led_mode & 0x03) > 1)
-		mode |= IPG_AC_LED_MODE_BIT_1;	/* Write Asic Control Bit 29 */
-
-	if ((sp->led_mode & 0x01) == 1)
-		mode |= IPG_AC_LED_MODE;	/* Write Asic Control Bit 14 */
-
-	if ((sp->led_mode & 0x08) == 8)
-		mode |= IPG_AC_LED_SPEED;	/* Write Asic Control Bit 27 */
-
-	ipg_w32(mode, ASIC_CTRL);
-}
-
-static void ipg_set_phy_set(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	int physet;
-
-	physet = ipg_r8(PHY_SET);
-	physet &= ~(IPG_PS_MEM_LENB9B | IPG_PS_MEM_LEN9 | IPG_PS_NON_COMPDET);
-	physet |= ((sp->led_mode & 0x70) >> 4);
-	ipg_w8(physet, PHY_SET);
-}
-
-static int ipg_reset(struct net_device *dev, u32 resetflags)
-{
-	/* Assert functional resets via the IPG AsicCtrl
-	 * register as specified by the 'resetflags' input
-	 * parameter.
-	 */
-	void __iomem *ioaddr = ipg_ioaddr(dev);
-	unsigned int timeout_count = 0;
-
-	IPG_DEBUG_MSG("_reset\n");
-
-	ipg_w32(ipg_r32(ASIC_CTRL) | resetflags, ASIC_CTRL);
-
-	/* Delay added to account for problem with 10Mbps reset. */
-	mdelay(IPG_AC_RESETWAIT);
-
-	while (IPG_AC_RESET_BUSY & ipg_r32(ASIC_CTRL)) {
-		mdelay(IPG_AC_RESETWAIT);
-		if (++timeout_count > IPG_AC_RESET_TIMEOUT)
-			return -ETIME;
-	}
-	/* Set LED Mode in Asic Control */
-	ipg_set_led_mode(dev);
-
-	/* Set PHYSet Register Value */
-	ipg_set_phy_set(dev);
-	return 0;
-}
-
-/* Find the GMII PHY address. */
-static int ipg_find_phyaddr(struct net_device *dev)
-{
-	unsigned int phyaddr, i;
-
-	for (i = 0; i < 32; i++) {
-		u32 status;
-
-		/* Search for the correct PHY address among 32 possible. */
-		phyaddr = (IPG_NIC_PHY_ADDRESS + i) % 32;
-
-		/* 10/22/03 Grace change verify from GMII_PHY_STATUS to
-		   GMII_PHY_ID1
-		 */
-
-		status = mdio_read(dev, phyaddr, MII_BMSR);
-
-		if ((status != 0xFFFF) && (status != 0))
-			return phyaddr;
-	}
-
-	return 0x1f;
-}
-
-/*
- * Configure IPG based on result of IEEE 802.3 PHY
- * auto-negotiation.
- */
-static int ipg_config_autoneg(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	unsigned int txflowcontrol;
-	unsigned int rxflowcontrol;
-	unsigned int fullduplex;
-	u32 mac_ctrl_val;
-	u32 asicctrl;
-	u8 phyctrl;
-	const char *speed;
-	const char *duplex;
-	const char *tx_desc;
-	const char *rx_desc;
-
-	IPG_DEBUG_MSG("_config_autoneg\n");
-
-	asicctrl = ipg_r32(ASIC_CTRL);
-	phyctrl = ipg_r8(PHY_CTRL);
-	mac_ctrl_val = ipg_r32(MAC_CTRL);
-
-	/* Set flags for use in resolving auto-negotiation, assuming
-	 * non-1000Mbps, half duplex, no flow control.
-	 */
-	fullduplex = 0;
-	txflowcontrol = 0;
-	rxflowcontrol = 0;
-
-	/* To accommodate a problem in 10Mbps operation,
-	 * set a global flag if PHY running in 10Mbps mode.
-	 */
-	sp->tenmbpsmode = 0;
-
-	/* Determine actual speed of operation. */
-	switch (phyctrl & IPG_PC_LINK_SPEED) {
-	case IPG_PC_LINK_SPEED_10MBPS:
-		speed = "10Mbps";
-		sp->tenmbpsmode = 1;
-		break;
-	case IPG_PC_LINK_SPEED_100MBPS:
-		speed = "100Mbps";
-		break;
-	case IPG_PC_LINK_SPEED_1000MBPS:
-		speed = "1000Mbps";
-		break;
-	default:
-		speed = "undefined!";
-		return 0;
-	}
-
-	netdev_info(dev, "Link speed = %s\n", speed);
-	if (sp->tenmbpsmode == 1)
-		netdev_info(dev, "10Mbps operational mode enabled\n");
-
-	if (phyctrl & IPG_PC_DUPLEX_STATUS) {
-		fullduplex = 1;
-		txflowcontrol = 1;
-		rxflowcontrol = 1;
-	}
-
-	/* Configure full duplex, and flow control. */
-	if (fullduplex == 1) {
-
-		/* Configure IPG for full duplex operation. */
-
-		duplex = "full";
-
-		mac_ctrl_val |= IPG_MC_DUPLEX_SELECT_FD;
-
-		if (txflowcontrol == 1) {
-			tx_desc  = "";
-			mac_ctrl_val |= IPG_MC_TX_FLOW_CONTROL_ENABLE;
-		} else {
-			tx_desc = "no ";
-			mac_ctrl_val &= ~IPG_MC_TX_FLOW_CONTROL_ENABLE;
-		}
-
-		if (rxflowcontrol == 1) {
-			rx_desc = "";
-			mac_ctrl_val |= IPG_MC_RX_FLOW_CONTROL_ENABLE;
-		} else {
-			rx_desc = "no ";
-			mac_ctrl_val &= ~IPG_MC_RX_FLOW_CONTROL_ENABLE;
-		}
-	} else {
-		duplex = "half";
-		tx_desc = "no ";
-		rx_desc = "no ";
-		mac_ctrl_val &= (~IPG_MC_DUPLEX_SELECT_FD &
-				 ~IPG_MC_TX_FLOW_CONTROL_ENABLE &
-				 ~IPG_MC_RX_FLOW_CONTROL_ENABLE);
-	}
-
-	netdev_info(dev, "setting %s duplex, %sTX, %sRX flow control\n",
-		    duplex, tx_desc, rx_desc);
-	ipg_w32(mac_ctrl_val, MAC_CTRL);
-
-	return 0;
-}
-
-/* Determine and configure multicast operation and set
- * receive mode for IPG.
- */
-static void ipg_nic_set_multicast_list(struct net_device *dev)
-{
-	void __iomem *ioaddr = ipg_ioaddr(dev);
-	struct netdev_hw_addr *ha;
-	unsigned int hashindex;
-	u32 hashtable[2];
-	u8 receivemode;
-
-	IPG_DEBUG_MSG("_nic_set_multicast_list\n");
-
-	receivemode = IPG_RM_RECEIVEUNICAST | IPG_RM_RECEIVEBROADCAST;
-
-	if (dev->flags & IFF_PROMISC) {
-		/* NIC to be configured in promiscuous mode. */
-		receivemode = IPG_RM_RECEIVEALLFRAMES;
-	} else if ((dev->flags & IFF_ALLMULTI) ||
-		   ((dev->flags & IFF_MULTICAST) &&
-		    (netdev_mc_count(dev) > IPG_MULTICAST_HASHTABLE_SIZE))) {
-		/* NIC to be configured to receive all multicast
-		 * frames. */
-		receivemode |= IPG_RM_RECEIVEMULTICAST;
-	} else if ((dev->flags & IFF_MULTICAST) && !netdev_mc_empty(dev)) {
-		/* NIC to be configured to receive selected
-		 * multicast addresses. */
-		receivemode |= IPG_RM_RECEIVEMULTICASTHASH;
-	}
-
-	/* Calculate the bits to set for the 64 bit, IPG HASHTABLE.
-	 * The IPG applies a cyclic-redundancy-check (the same CRC
-	 * used to calculate the frame data FCS) to the destination
-	 * address all incoming multicast frames whose destination
-	 * address has the multicast bit set. The least significant
-	 * 6 bits of the CRC result are used as an addressing index
-	 * into the hash table. If the value of the bit addressed by
-	 * this index is a 1, the frame is passed to the host system.
-	 */
-
-	/* Clear hashtable. */
-	hashtable[0] = 0x00000000;
-	hashtable[1] = 0x00000000;
-
-	/* Cycle through all multicast addresses to filter. */
-	netdev_for_each_mc_addr(ha, dev) {
-		/* Calculate CRC result for each multicast address. */
-		hashindex = crc32_le(0xffffffff, ha->addr,
-				     ETH_ALEN);
-
-		/* Use only the least significant 6 bits. */
-		hashindex = hashindex & 0x3F;
-
-		/* Within "hashtable", set bit number "hashindex"
-		 * to a logic 1.
-		 */
-		set_bit(hashindex, (void *)hashtable);
-	}
-
-	/* Write the value of the hashtable, to the 4, 16 bit
-	 * HASHTABLE IPG registers.
-	 */
-	ipg_w32(hashtable[0], HASHTABLE_0);
-	ipg_w32(hashtable[1], HASHTABLE_1);
-
-	ipg_w8(IPG_RM_RSVD_MASK & receivemode, RECEIVE_MODE);
-
-	IPG_DEBUG_MSG("ReceiveMode = %x\n", ipg_r8(RECEIVE_MODE));
-}
-
-static int ipg_io_config(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = ipg_ioaddr(dev);
-	u32 origmacctrl;
-	u32 restoremacctrl;
-
-	IPG_DEBUG_MSG("_io_config\n");
-
-	origmacctrl = ipg_r32(MAC_CTRL);
-
-	restoremacctrl = origmacctrl | IPG_MC_STATISTICS_ENABLE;
-
-	/* Based on compilation option, determine if FCS is to be
-	 * stripped on receive frames by IPG.
-	 */
-	if (!IPG_STRIP_FCS_ON_RX)
-		restoremacctrl |= IPG_MC_RCV_FCS;
-
-	/* Determine if transmitter and/or receiver are
-	 * enabled so we may restore MACCTRL correctly.
-	 */
-	if (origmacctrl & IPG_MC_TX_ENABLED)
-		restoremacctrl |= IPG_MC_TX_ENABLE;
-
-	if (origmacctrl & IPG_MC_RX_ENABLED)
-		restoremacctrl |= IPG_MC_RX_ENABLE;
-
-	/* Transmitter and receiver must be disabled before setting
-	 * IFSSelect.
-	 */
-	ipg_w32((origmacctrl & (IPG_MC_RX_DISABLE | IPG_MC_TX_DISABLE)) &
-		IPG_MC_RSVD_MASK, MAC_CTRL);
-
-	/* Now that transmitter and receiver are disabled, write
-	 * to IFSSelect.
-	 */
-	ipg_w32((origmacctrl & IPG_MC_IFS_96BIT) & IPG_MC_RSVD_MASK, MAC_CTRL);
-
-	/* Set RECEIVEMODE register. */
-	ipg_nic_set_multicast_list(dev);
-
-	ipg_w16(sp->max_rxframe_size, MAX_FRAME_SIZE);
-
-	ipg_w8(IPG_RXDMAPOLLPERIOD_VALUE,   RX_DMA_POLL_PERIOD);
-	ipg_w8(IPG_RXDMAURGENTTHRESH_VALUE, RX_DMA_URGENT_THRESH);
-	ipg_w8(IPG_RXDMABURSTTHRESH_VALUE,  RX_DMA_BURST_THRESH);
-	ipg_w8(IPG_TXDMAPOLLPERIOD_VALUE,   TX_DMA_POLL_PERIOD);
-	ipg_w8(IPG_TXDMAURGENTTHRESH_VALUE, TX_DMA_URGENT_THRESH);
-	ipg_w8(IPG_TXDMABURSTTHRESH_VALUE,  TX_DMA_BURST_THRESH);
-	ipg_w16((IPG_IE_HOST_ERROR | IPG_IE_TX_DMA_COMPLETE |
-		 IPG_IE_TX_COMPLETE | IPG_IE_INT_REQUESTED |
-		 IPG_IE_UPDATE_STATS | IPG_IE_LINK_EVENT |
-		 IPG_IE_RX_DMA_COMPLETE | IPG_IE_RX_DMA_PRIORITY), INT_ENABLE);
-	ipg_w16(IPG_FLOWONTHRESH_VALUE,  FLOW_ON_THRESH);
-	ipg_w16(IPG_FLOWOFFTHRESH_VALUE, FLOW_OFF_THRESH);
-
-	/* IPG multi-frag frame bug workaround.
-	 * Per silicon revision B3 eratta.
-	 */
-	ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0200, DEBUG_CTRL);
-
-	/* IPG TX poll now bug workaround.
-	 * Per silicon revision B3 eratta.
-	 */
-	ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0010, DEBUG_CTRL);
-
-	/* IPG RX poll now bug workaround.
-	 * Per silicon revision B3 eratta.
-	 */
-	ipg_w16(ipg_r16(DEBUG_CTRL) | 0x0020, DEBUG_CTRL);
-
-	/* Now restore MACCTRL to original setting. */
-	ipg_w32(IPG_MC_RSVD_MASK & restoremacctrl, MAC_CTRL);
-
-	/* Disable unused RMON statistics. */
-	ipg_w32(IPG_RZ_ALL, RMON_STATISTICS_MASK);
-
-	/* Disable unused MIB statistics. */
-	ipg_w32(IPG_SM_MACCONTROLFRAMESXMTD | IPG_SM_MACCONTROLFRAMESRCVD |
-		IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK | IPG_SM_TXJUMBOFRAMES |
-		IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK | IPG_SM_RXJUMBOFRAMES |
-		IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK |
-		IPG_SM_UDPCHECKSUMERRORS | IPG_SM_TCPCHECKSUMERRORS |
-		IPG_SM_IPCHECKSUMERRORS, STATISTICS_MASK);
-
-	return 0;
-}
-
-/*
- * Create a receive buffer within system memory and update
- * NIC private structure appropriately.
- */
-static int ipg_get_rxbuff(struct net_device *dev, int entry)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	struct ipg_rx *rxfd = sp->rxd + entry;
-	struct sk_buff *skb;
-	u64 rxfragsize;
-
-	IPG_DEBUG_MSG("_get_rxbuff\n");
-
-	skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
-	if (!skb) {
-		sp->rx_buff[entry] = NULL;
-		return -ENOMEM;
-	}
-
-	/* Save the address of the sk_buff structure. */
-	sp->rx_buff[entry] = skb;
-
-	rxfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
-		sp->rx_buf_sz, PCI_DMA_FROMDEVICE));
-
-	/* Set the RFD fragment length. */
-	rxfragsize = sp->rxfrag_size;
-	rxfd->frag_info |= cpu_to_le64((rxfragsize << 48) & IPG_RFI_FRAGLEN);
-
-	return 0;
-}
-
-static int init_rfdlist(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	unsigned int i;
-
-	IPG_DEBUG_MSG("_init_rfdlist\n");
-
-	for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
-		struct ipg_rx *rxfd = sp->rxd + i;
-
-		if (sp->rx_buff[i]) {
-			pci_unmap_single(sp->pdev,
-				le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
-				sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-			dev_kfree_skb_irq(sp->rx_buff[i]);
-			sp->rx_buff[i] = NULL;
-		}
-
-		/* Clear out the RFS field. */
-		rxfd->rfs = 0x0000000000000000;
-
-		if (ipg_get_rxbuff(dev, i) < 0) {
-			/*
-			 * A receive buffer was not ready, break the
-			 * RFD list here.
-			 */
-			IPG_DEBUG_MSG("Cannot allocate Rx buffer\n");
-
-			/* Just in case we cannot allocate a single RFD.
-			 * Should not occur.
-			 */
-			if (i == 0) {
-				netdev_err(dev, "No memory available for RFD list\n");
-				return -ENOMEM;
-			}
-		}
-
-		rxfd->next_desc = cpu_to_le64(sp->rxd_map +
-			sizeof(struct ipg_rx)*(i + 1));
-	}
-	sp->rxd[i - 1].next_desc = cpu_to_le64(sp->rxd_map);
-
-	sp->rx_current = 0;
-	sp->rx_dirty = 0;
-
-	/* Write the location of the RFDList to the IPG. */
-	ipg_w32((u32) sp->rxd_map, RFD_LIST_PTR_0);
-	ipg_w32(0x00000000, RFD_LIST_PTR_1);
-
-	return 0;
-}
-
-static void init_tfdlist(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	unsigned int i;
-
-	IPG_DEBUG_MSG("_init_tfdlist\n");
-
-	for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
-		struct ipg_tx *txfd = sp->txd + i;
-
-		txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
-
-		if (sp->tx_buff[i]) {
-			dev_kfree_skb_irq(sp->tx_buff[i]);
-			sp->tx_buff[i] = NULL;
-		}
-
-		txfd->next_desc = cpu_to_le64(sp->txd_map +
-			sizeof(struct ipg_tx)*(i + 1));
-	}
-	sp->txd[i - 1].next_desc = cpu_to_le64(sp->txd_map);
-
-	sp->tx_current = 0;
-	sp->tx_dirty = 0;
-
-	/* Write the location of the TFDList to the IPG. */
-	IPG_DDEBUG_MSG("Starting TFDListPtr = %08x\n",
-		       (u32) sp->txd_map);
-	ipg_w32((u32) sp->txd_map, TFD_LIST_PTR_0);
-	ipg_w32(0x00000000, TFD_LIST_PTR_1);
-
-	sp->reset_current_tfd = 1;
-}
-
-/*
- * Free all transmit buffers which have already been transferred
- * via DMA to the IPG.
- */
-static void ipg_nic_txfree(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	unsigned int released, pending, dirty;
-
-	IPG_DEBUG_MSG("_nic_txfree\n");
-
-	pending = sp->tx_current - sp->tx_dirty;
-	dirty = sp->tx_dirty % IPG_TFDLIST_LENGTH;
-
-	for (released = 0; released < pending; released++) {
-		struct sk_buff *skb = sp->tx_buff[dirty];
-		struct ipg_tx *txfd = sp->txd + dirty;
-
-		IPG_DEBUG_MSG("TFC = %016lx\n", (unsigned long) txfd->tfc);
-
-		/* Look at each TFD's TFC field beginning
-		 * at the last freed TFD up to the current TFD.
-		 * If the TFDDone bit is set, free the associated
-		 * buffer.
-		 */
-		if (!(txfd->tfc & cpu_to_le64(IPG_TFC_TFDDONE)))
-                        break;
-
-		/* Free the transmit buffer. */
-		if (skb) {
-			pci_unmap_single(sp->pdev,
-				le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
-				skb->len, PCI_DMA_TODEVICE);
-
-			dev_kfree_skb_irq(skb);
-
-			sp->tx_buff[dirty] = NULL;
-		}
-		dirty = (dirty + 1) % IPG_TFDLIST_LENGTH;
-	}
-
-	sp->tx_dirty += released;
-
-	if (netif_queue_stopped(dev) &&
-	    (sp->tx_current != (sp->tx_dirty + IPG_TFDLIST_LENGTH))) {
-		netif_wake_queue(dev);
-	}
-}
-
-static void ipg_tx_timeout(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-
-	ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA | IPG_AC_NETWORK |
-		  IPG_AC_FIFO);
-
-	spin_lock_irq(&sp->lock);
-
-	/* Re-configure after DMA reset. */
-	if (ipg_io_config(dev) < 0)
-		netdev_info(dev, "Error during re-configuration\n");
-
-	init_tfdlist(dev);
-
-	spin_unlock_irq(&sp->lock);
-
-	ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) & IPG_MC_RSVD_MASK,
-		MAC_CTRL);
-}
-
-/*
- * For TxComplete interrupts, free all transmit
- * buffers which have already been transferred via DMA
- * to the IPG.
- */
-static void ipg_nic_txcleanup(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	unsigned int i;
-
-	IPG_DEBUG_MSG("_nic_txcleanup\n");
-
-	for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
-		/* Reading the TXSTATUS register clears the
-		 * TX_COMPLETE interrupt.
-		 */
-		u32 txstatusdword = ipg_r32(TX_STATUS);
-
-		IPG_DEBUG_MSG("TxStatus = %08x\n", txstatusdword);
-
-		/* Check for Transmit errors. Error bits only valid if
-		 * TX_COMPLETE bit in the TXSTATUS register is a 1.
-		 */
-		if (!(txstatusdword & IPG_TS_TX_COMPLETE))
-			break;
-
-		/* If in 10Mbps mode, indicate transmit is ready. */
-		if (sp->tenmbpsmode) {
-			netif_wake_queue(dev);
-		}
-
-		/* Transmit error, increment stat counters. */
-		if (txstatusdword & IPG_TS_TX_ERROR) {
-			IPG_DEBUG_MSG("Transmit error\n");
-			sp->stats.tx_errors++;
-		}
-
-		/* Late collision, re-enable transmitter. */
-		if (txstatusdword & IPG_TS_LATE_COLLISION) {
-			IPG_DEBUG_MSG("Late collision on transmit\n");
-			ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
-				IPG_MC_RSVD_MASK, MAC_CTRL);
-		}
-
-		/* Maximum collisions, re-enable transmitter. */
-		if (txstatusdword & IPG_TS_TX_MAX_COLL) {
-			IPG_DEBUG_MSG("Maximum collisions on transmit\n");
-			ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
-				IPG_MC_RSVD_MASK, MAC_CTRL);
-		}
-
-		/* Transmit underrun, reset and re-enable
-		 * transmitter.
-		 */
-		if (txstatusdword & IPG_TS_TX_UNDERRUN) {
-			IPG_DEBUG_MSG("Transmitter underrun\n");
-			sp->stats.tx_fifo_errors++;
-			ipg_reset(dev, IPG_AC_TX_RESET | IPG_AC_DMA |
-				  IPG_AC_NETWORK | IPG_AC_FIFO);
-
-			/* Re-configure after DMA reset. */
-			if (ipg_io_config(dev) < 0) {
-				netdev_info(dev, "Error during re-configuration\n");
-			}
-			init_tfdlist(dev);
-
-			ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_TX_ENABLE) &
-				IPG_MC_RSVD_MASK, MAC_CTRL);
-		}
-	}
-
-	ipg_nic_txfree(dev);
-}
-
-/* Provides statistical information about the IPG NIC. */
-static struct net_device_stats *ipg_nic_get_stats(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	u16 temp1;
-	u16 temp2;
-
-	IPG_DEBUG_MSG("_nic_get_stats\n");
-
-	/* Check to see if the NIC has been initialized via nic_open,
-	 * before trying to read statistic registers.
-	 */
-	if (!netif_running(dev))
-		return &sp->stats;
-
-	sp->stats.rx_packets += ipg_r32(IPG_FRAMESRCVDOK);
-	sp->stats.tx_packets += ipg_r32(IPG_FRAMESXMTDOK);
-	sp->stats.rx_bytes += ipg_r32(IPG_OCTETRCVOK);
-	sp->stats.tx_bytes += ipg_r32(IPG_OCTETXMTOK);
-	temp1 = ipg_r16(IPG_FRAMESLOSTRXERRORS);
-	sp->stats.rx_errors += temp1;
-	sp->stats.rx_missed_errors += temp1;
-	temp1 = ipg_r32(IPG_SINGLECOLFRAMES) + ipg_r32(IPG_MULTICOLFRAMES) +
-		ipg_r32(IPG_LATECOLLISIONS);
-	temp2 = ipg_r16(IPG_CARRIERSENSEERRORS);
-	sp->stats.collisions += temp1;
-	sp->stats.tx_dropped += ipg_r16(IPG_FRAMESABORTXSCOLLS);
-	sp->stats.tx_errors += ipg_r16(IPG_FRAMESWEXDEFERRAL) +
-		ipg_r32(IPG_FRAMESWDEFERREDXMT) + temp1 + temp2;
-	sp->stats.multicast += ipg_r32(IPG_MCSTOCTETRCVDOK);
-
-	/* detailed tx_errors */
-	sp->stats.tx_carrier_errors += temp2;
-
-	/* detailed rx_errors */
-	sp->stats.rx_length_errors += ipg_r16(IPG_INRANGELENGTHERRORS) +
-		ipg_r16(IPG_FRAMETOOLONGERRORS);
-	sp->stats.rx_crc_errors += ipg_r16(IPG_FRAMECHECKSEQERRORS);
-
-	/* Unutilized IPG statistic registers. */
-	ipg_r32(IPG_MCSTFRAMESRCVDOK);
-
-	return &sp->stats;
-}
-
-/* Restore used receive buffers. */
-static int ipg_nic_rxrestore(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	const unsigned int curr = sp->rx_current;
-	unsigned int dirty = sp->rx_dirty;
-
-	IPG_DEBUG_MSG("_nic_rxrestore\n");
-
-	for (dirty = sp->rx_dirty; curr - dirty > 0; dirty++) {
-		unsigned int entry = dirty % IPG_RFDLIST_LENGTH;
-
-		/* rx_copybreak may poke hole here and there. */
-		if (sp->rx_buff[entry])
-			continue;
-
-		/* Generate a new receive buffer to replace the
-		 * current buffer (which will be released by the
-		 * Linux system).
-		 */
-		if (ipg_get_rxbuff(dev, entry) < 0) {
-			IPG_DEBUG_MSG("Cannot allocate new Rx buffer\n");
-
-			break;
-		}
-
-		/* Reset the RFS field. */
-		sp->rxd[entry].rfs = 0x0000000000000000;
-	}
-	sp->rx_dirty = dirty;
-
-	return 0;
-}
-
-/* use jumboindex and jumbosize to control jumbo frame status
- * initial status is jumboindex=-1 and jumbosize=0
- * 1. jumboindex = -1 and jumbosize=0 : previous jumbo frame has been done.
- * 2. jumboindex != -1 and jumbosize != 0 : jumbo frame is not over size and receiving
- * 3. jumboindex = -1 and jumbosize != 0 : jumbo frame is over size, already dump
- *               previous receiving and need to continue dumping the current one
- */
-enum {
-	NORMAL_PACKET,
-	ERROR_PACKET
-};
-
-enum {
-	FRAME_NO_START_NO_END	= 0,
-	FRAME_WITH_START		= 1,
-	FRAME_WITH_END		= 10,
-	FRAME_WITH_START_WITH_END = 11
-};
-
-static void ipg_nic_rx_free_skb(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
-
-	if (sp->rx_buff[entry]) {
-		struct ipg_rx *rxfd = sp->rxd + entry;
-
-		pci_unmap_single(sp->pdev,
-			le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
-			sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-		dev_kfree_skb_irq(sp->rx_buff[entry]);
-		sp->rx_buff[entry] = NULL;
-	}
-}
-
-static int ipg_nic_rx_check_frame_type(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	struct ipg_rx *rxfd = sp->rxd + (sp->rx_current % IPG_RFDLIST_LENGTH);
-	int type = FRAME_NO_START_NO_END;
-
-	if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART)
-		type += FRAME_WITH_START;
-	if (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND)
-		type += FRAME_WITH_END;
-	return type;
-}
-
-static int ipg_nic_rx_check_error(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	unsigned int entry = sp->rx_current % IPG_RFDLIST_LENGTH;
-	struct ipg_rx *rxfd = sp->rxd + entry;
-
-	if (IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
-	     (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
-	      IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
-	      IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR))) {
-		IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
-			      (unsigned long) rxfd->rfs);
-
-		/* Increment general receive error statistic. */
-		sp->stats.rx_errors++;
-
-		/* Increment detailed receive error statistics. */
-		if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
-			IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
-
-			sp->stats.rx_fifo_errors++;
-		}
-
-		if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
-			IPG_DEBUG_MSG("RX runt occurred\n");
-			sp->stats.rx_length_errors++;
-		}
-
-		/* Do nothing for IPG_RFS_RXOVERSIZEDFRAME,
-		 * error count handled by a IPG statistic register.
-		 */
-
-		if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
-			IPG_DEBUG_MSG("RX alignment error occurred\n");
-			sp->stats.rx_frame_errors++;
-		}
-
-		/* Do nothing for IPG_RFS_RXFCSERROR, error count
-		 * handled by a IPG statistic register.
-		 */
-
-		/* Free the memory associated with the RX
-		 * buffer since it is erroneous and we will
-		 * not pass it to higher layer processes.
-		 */
-		if (sp->rx_buff[entry]) {
-			pci_unmap_single(sp->pdev,
-				le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
-				sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
-			dev_kfree_skb_irq(sp->rx_buff[entry]);
-			sp->rx_buff[entry] = NULL;
-		}
-		return ERROR_PACKET;
-	}
-	return NORMAL_PACKET;
-}
-
-static void ipg_nic_rx_with_start_and_end(struct net_device *dev,
-					  struct ipg_nic_private *sp,
-					  struct ipg_rx *rxfd, unsigned entry)
-{
-	struct ipg_jumbo *jumbo = &sp->jumbo;
-	struct sk_buff *skb;
-	int framelen;
-
-	if (jumbo->found_start) {
-		dev_kfree_skb_irq(jumbo->skb);
-		jumbo->found_start = 0;
-		jumbo->current_size = 0;
-		jumbo->skb = NULL;
-	}
-
-	/* 1: found error, 0 no error */
-	if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
-		return;
-
-	skb = sp->rx_buff[entry];
-	if (!skb)
-		return;
-
-	/* accept this frame and send to upper layer */
-	framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
-	if (framelen > sp->rxfrag_size)
-		framelen = sp->rxfrag_size;
-
-	skb_put(skb, framelen);
-	skb->protocol = eth_type_trans(skb, dev);
-	skb_checksum_none_assert(skb);
-	netif_rx(skb);
-	sp->rx_buff[entry] = NULL;
-}
-
-static void ipg_nic_rx_with_start(struct net_device *dev,
-				  struct ipg_nic_private *sp,
-				  struct ipg_rx *rxfd, unsigned entry)
-{
-	struct ipg_jumbo *jumbo = &sp->jumbo;
-	struct pci_dev *pdev = sp->pdev;
-	struct sk_buff *skb;
-
-	/* 1: found error, 0 no error */
-	if (ipg_nic_rx_check_error(dev) != NORMAL_PACKET)
-		return;
-
-	/* accept this frame and send to upper layer */
-	skb = sp->rx_buff[entry];
-	if (!skb)
-		return;
-
-	if (jumbo->found_start)
-		dev_kfree_skb_irq(jumbo->skb);
-
-	pci_unmap_single(pdev, le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
-			 sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
-	skb_put(skb, sp->rxfrag_size);
-
-	jumbo->found_start = 1;
-	jumbo->current_size = sp->rxfrag_size;
-	jumbo->skb = skb;
-
-	sp->rx_buff[entry] = NULL;
-}
-
-static void ipg_nic_rx_with_end(struct net_device *dev,
-				struct ipg_nic_private *sp,
-				struct ipg_rx *rxfd, unsigned entry)
-{
-	struct ipg_jumbo *jumbo = &sp->jumbo;
-
-	/* 1: found error, 0 no error */
-	if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
-		struct sk_buff *skb = sp->rx_buff[entry];
-
-		if (!skb)
-			return;
-
-		if (jumbo->found_start) {
-			int framelen, endframelen;
-
-			framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
-
-			endframelen = framelen - jumbo->current_size;
-			if (framelen > sp->rxsupport_size)
-				dev_kfree_skb_irq(jumbo->skb);
-			else {
-				memcpy(skb_put(jumbo->skb, endframelen),
-				       skb->data, endframelen);
-
-				jumbo->skb->protocol =
-				    eth_type_trans(jumbo->skb, dev);
-
-				skb_checksum_none_assert(jumbo->skb);
-				netif_rx(jumbo->skb);
-			}
-		}
-
-		jumbo->found_start = 0;
-		jumbo->current_size = 0;
-		jumbo->skb = NULL;
-
-		ipg_nic_rx_free_skb(dev);
-	} else {
-		dev_kfree_skb_irq(jumbo->skb);
-		jumbo->found_start = 0;
-		jumbo->current_size = 0;
-		jumbo->skb = NULL;
-	}
-}
-
-static void ipg_nic_rx_no_start_no_end(struct net_device *dev,
-				       struct ipg_nic_private *sp,
-				       struct ipg_rx *rxfd, unsigned entry)
-{
-	struct ipg_jumbo *jumbo = &sp->jumbo;
-
-	/* 1: found error, 0 no error */
-	if (ipg_nic_rx_check_error(dev) == NORMAL_PACKET) {
-		struct sk_buff *skb = sp->rx_buff[entry];
-
-		if (skb) {
-			if (jumbo->found_start) {
-				jumbo->current_size += sp->rxfrag_size;
-				if (jumbo->current_size <= sp->rxsupport_size) {
-					memcpy(skb_put(jumbo->skb,
-						       sp->rxfrag_size),
-					       skb->data, sp->rxfrag_size);
-				}
-			}
-			ipg_nic_rx_free_skb(dev);
-		}
-	} else {
-		dev_kfree_skb_irq(jumbo->skb);
-		jumbo->found_start = 0;
-		jumbo->current_size = 0;
-		jumbo->skb = NULL;
-	}
-}
-
-static int ipg_nic_rx_jumbo(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	unsigned int curr = sp->rx_current;
-	void __iomem *ioaddr = sp->ioaddr;
-	unsigned int i;
-
-	IPG_DEBUG_MSG("_nic_rx\n");
-
-	for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
-		unsigned int entry = curr % IPG_RFDLIST_LENGTH;
-		struct ipg_rx *rxfd = sp->rxd + entry;
-
-		if (!(rxfd->rfs & cpu_to_le64(IPG_RFS_RFDDONE)))
-			break;
-
-		switch (ipg_nic_rx_check_frame_type(dev)) {
-		case FRAME_WITH_START_WITH_END:
-			ipg_nic_rx_with_start_and_end(dev, sp, rxfd, entry);
-			break;
-		case FRAME_WITH_START:
-			ipg_nic_rx_with_start(dev, sp, rxfd, entry);
-			break;
-		case FRAME_WITH_END:
-			ipg_nic_rx_with_end(dev, sp, rxfd, entry);
-			break;
-		case FRAME_NO_START_NO_END:
-			ipg_nic_rx_no_start_no_end(dev, sp, rxfd, entry);
-			break;
-		}
-	}
-
-	sp->rx_current = curr;
-
-	if (i == IPG_MAXRFDPROCESS_COUNT) {
-		/* There are more RFDs to process, however the
-		 * allocated amount of RFD processing time has
-		 * expired. Assert Interrupt Requested to make
-		 * sure we come back to process the remaining RFDs.
-		 */
-		ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
-	}
-
-	ipg_nic_rxrestore(dev);
-
-	return 0;
-}
-
-static int ipg_nic_rx(struct net_device *dev)
-{
-	/* Transfer received Ethernet frames to higher network layers. */
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	unsigned int curr = sp->rx_current;
-	void __iomem *ioaddr = sp->ioaddr;
-	struct ipg_rx *rxfd;
-	unsigned int i;
-
-	IPG_DEBUG_MSG("_nic_rx\n");
-
-#define __RFS_MASK \
-	cpu_to_le64(IPG_RFS_RFDDONE | IPG_RFS_FRAMESTART | IPG_RFS_FRAMEEND)
-
-	for (i = 0; i < IPG_MAXRFDPROCESS_COUNT; i++, curr++) {
-		unsigned int entry = curr % IPG_RFDLIST_LENGTH;
-		struct sk_buff *skb = sp->rx_buff[entry];
-		unsigned int framelen;
-
-		rxfd = sp->rxd + entry;
-
-		if (((rxfd->rfs & __RFS_MASK) != __RFS_MASK) || !skb)
-			break;
-
-		/* Get received frame length. */
-		framelen = le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFRAMELEN;
-
-		/* Check for jumbo frame arrival with too small
-		 * RXFRAG_SIZE.
-		 */
-		if (framelen > sp->rxfrag_size) {
-			IPG_DEBUG_MSG
-			    ("RFS FrameLen > allocated fragment size\n");
-
-			framelen = sp->rxfrag_size;
-		}
-
-		if ((IPG_DROP_ON_RX_ETH_ERRORS && (le64_to_cpu(rxfd->rfs) &
-		       (IPG_RFS_RXFIFOOVERRUN | IPG_RFS_RXRUNTFRAME |
-			IPG_RFS_RXALIGNMENTERROR | IPG_RFS_RXFCSERROR |
-			IPG_RFS_RXOVERSIZEDFRAME | IPG_RFS_RXLENGTHERROR)))) {
-
-			IPG_DEBUG_MSG("Rx error, RFS = %016lx\n",
-				      (unsigned long int) rxfd->rfs);
-
-			/* Increment general receive error statistic. */
-			sp->stats.rx_errors++;
-
-			/* Increment detailed receive error statistics. */
-			if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFIFOOVERRUN) {
-				IPG_DEBUG_MSG("RX FIFO overrun occurred\n");
-				sp->stats.rx_fifo_errors++;
-			}
-
-			if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXRUNTFRAME) {
-				IPG_DEBUG_MSG("RX runt occurred\n");
-				sp->stats.rx_length_errors++;
-			}
-
-			if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXOVERSIZEDFRAME) ;
-			/* Do nothing, error count handled by a IPG
-			 * statistic register.
-			 */
-
-			if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXALIGNMENTERROR) {
-				IPG_DEBUG_MSG("RX alignment error occurred\n");
-				sp->stats.rx_frame_errors++;
-			}
-
-			if (le64_to_cpu(rxfd->rfs) & IPG_RFS_RXFCSERROR) ;
-			/* Do nothing, error count handled by a IPG
-			 * statistic register.
-			 */
-
-			/* Free the memory associated with the RX
-			 * buffer since it is erroneous and we will
-			 * not pass it to higher layer processes.
-			 */
-			if (skb) {
-				__le64 info = rxfd->frag_info;
-
-				pci_unmap_single(sp->pdev,
-					le64_to_cpu(info) & ~IPG_RFI_FRAGLEN,
-					sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-
-				dev_kfree_skb_irq(skb);
-			}
-		} else {
-
-			/* Adjust the new buffer length to accommodate the size
-			 * of the received frame.
-			 */
-			skb_put(skb, framelen);
-
-			/* Set the buffer's protocol field to Ethernet. */
-			skb->protocol = eth_type_trans(skb, dev);
-
-			/* The IPG encountered an error with (or
-			 * there were no) IP/TCP/UDP checksums.
-			 * This may or may not indicate an invalid
-			 * IP/TCP/UDP frame was received. Let the
-			 * upper layer decide.
-			 */
-			skb_checksum_none_assert(skb);
-
-			/* Hand off frame for higher layer processing.
-			 * The function netif_rx() releases the sk_buff
-			 * when processing completes.
-			 */
-			netif_rx(skb);
-		}
-
-		/* Assure RX buffer is not reused by IPG. */
-		sp->rx_buff[entry] = NULL;
-	}
-
-	/*
-	 * If there are more RFDs to process and the allocated amount of RFD
-	 * processing time has expired, assert Interrupt Requested to make
-	 * sure we come back to process the remaining RFDs.
-	 */
-	if (i == IPG_MAXRFDPROCESS_COUNT)
-		ipg_w32(ipg_r32(ASIC_CTRL) | IPG_AC_INT_REQUEST, ASIC_CTRL);
-
-#ifdef IPG_DEBUG
-	/* Check if the RFD list contained no receive frame data. */
-	if (!i)
-		sp->EmptyRFDListCount++;
-#endif
-	while ((le64_to_cpu(rxfd->rfs) & IPG_RFS_RFDDONE) &&
-	       !((le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMESTART) &&
-		 (le64_to_cpu(rxfd->rfs) & IPG_RFS_FRAMEEND))) {
-		unsigned int entry = curr++ % IPG_RFDLIST_LENGTH;
-
-		rxfd = sp->rxd + entry;
-
-		IPG_DEBUG_MSG("Frame requires multiple RFDs\n");
-
-		/* An unexpected event, additional code needed to handle
-		 * properly. So for the time being, just disregard the
-		 * frame.
-		 */
-
-		/* Free the memory associated with the RX
-		 * buffer since it is erroneous and we will
-		 * not pass it to higher layer processes.
-		 */
-		if (sp->rx_buff[entry]) {
-			pci_unmap_single(sp->pdev,
-				le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
-				sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-			dev_kfree_skb_irq(sp->rx_buff[entry]);
-		}
-
-		/* Assure RX buffer is not reused by IPG. */
-		sp->rx_buff[entry] = NULL;
-	}
-
-	sp->rx_current = curr;
-
-	/* Check to see if there are a minimum number of used
-	 * RFDs before restoring any (should improve performance.)
-	 */
-	if ((curr - sp->rx_dirty) >= IPG_MINUSEDRFDSTOFREE)
-		ipg_nic_rxrestore(dev);
-
-	return 0;
-}
-
-static void ipg_reset_after_host_error(struct work_struct *work)
-{
-	struct ipg_nic_private *sp =
-		container_of(work, struct ipg_nic_private, task.work);
-	struct net_device *dev = sp->dev;
-
-	/*
-	 * Acknowledge HostError interrupt by resetting
-	 * IPG DMA and HOST.
-	 */
-	ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
-
-	init_rfdlist(dev);
-	init_tfdlist(dev);
-
-	if (ipg_io_config(dev) < 0) {
-		netdev_info(dev, "Cannot recover from PCI error\n");
-		schedule_delayed_work(&sp->task, HZ);
-	}
-}
-
-static irqreturn_t ipg_interrupt_handler(int irq, void *dev_inst)
-{
-	struct net_device *dev = dev_inst;
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	unsigned int handled = 0;
-	u16 status;
-
-	IPG_DEBUG_MSG("_interrupt_handler\n");
-
-	if (sp->is_jumbo)
-		ipg_nic_rxrestore(dev);
-
-	spin_lock(&sp->lock);
-
-	/* Get interrupt source information, and acknowledge
-	 * some (i.e. TxDMAComplete, RxDMAComplete, RxEarly,
-	 * IntRequested, MacControlFrame, LinkEvent) interrupts
-	 * if issued. Also, all IPG interrupts are disabled by
-	 * reading IntStatusAck.
-	 */
-	status = ipg_r16(INT_STATUS_ACK);
-
-	IPG_DEBUG_MSG("IntStatusAck = %04x\n", status);
-
-	/* Shared IRQ of remove event. */
-	if (!(status & IPG_IS_RSVD_MASK))
-		goto out_enable;
-
-	handled = 1;
-
-	if (unlikely(!netif_running(dev)))
-		goto out_unlock;
-
-	/* If RFDListEnd interrupt, restore all used RFDs. */
-	if (status & IPG_IS_RFD_LIST_END) {
-		IPG_DEBUG_MSG("RFDListEnd Interrupt\n");
-
-		/* The RFD list end indicates an RFD was encountered
-		 * with a 0 NextPtr, or with an RFDDone bit set to 1
-		 * (indicating the RFD is not read for use by the
-		 * IPG.) Try to restore all RFDs.
-		 */
-		ipg_nic_rxrestore(dev);
-
-#ifdef IPG_DEBUG
-		/* Increment the RFDlistendCount counter. */
-		sp->RFDlistendCount++;
-#endif
-	}
-
-	/* If RFDListEnd, RxDMAPriority, RxDMAComplete, or
-	 * IntRequested interrupt, process received frames. */
-	if ((status & IPG_IS_RX_DMA_PRIORITY) ||
-	    (status & IPG_IS_RFD_LIST_END) ||
-	    (status & IPG_IS_RX_DMA_COMPLETE) ||
-	    (status & IPG_IS_INT_REQUESTED)) {
-#ifdef IPG_DEBUG
-		/* Increment the RFD list checked counter if interrupted
-		 * only to check the RFD list. */
-		if (status & (~(IPG_IS_RX_DMA_PRIORITY | IPG_IS_RFD_LIST_END |
-				IPG_IS_RX_DMA_COMPLETE | IPG_IS_INT_REQUESTED) &
-			       (IPG_IS_HOST_ERROR | IPG_IS_TX_DMA_COMPLETE |
-				IPG_IS_LINK_EVENT | IPG_IS_TX_COMPLETE |
-				IPG_IS_UPDATE_STATS)))
-			sp->RFDListCheckedCount++;
-#endif
-
-		if (sp->is_jumbo)
-			ipg_nic_rx_jumbo(dev);
-		else
-			ipg_nic_rx(dev);
-	}
-
-	/* If TxDMAComplete interrupt, free used TFDs. */
-	if (status & IPG_IS_TX_DMA_COMPLETE)
-		ipg_nic_txfree(dev);
-
-	/* TxComplete interrupts indicate one of numerous actions.
-	 * Determine what action to take based on TXSTATUS register.
-	 */
-	if (status & IPG_IS_TX_COMPLETE)
-		ipg_nic_txcleanup(dev);
-
-	/* If UpdateStats interrupt, update Linux Ethernet statistics */
-	if (status & IPG_IS_UPDATE_STATS)
-		ipg_nic_get_stats(dev);
-
-	/* If HostError interrupt, reset IPG. */
-	if (status & IPG_IS_HOST_ERROR) {
-		IPG_DDEBUG_MSG("HostError Interrupt\n");
-
-		schedule_delayed_work(&sp->task, 0);
-	}
-
-	/* If LinkEvent interrupt, resolve autonegotiation. */
-	if (status & IPG_IS_LINK_EVENT) {
-		if (ipg_config_autoneg(dev) < 0)
-			netdev_info(dev, "Auto-negotiation error\n");
-	}
-
-	/* If MACCtrlFrame interrupt, do nothing. */
-	if (status & IPG_IS_MAC_CTRL_FRAME)
-		IPG_DEBUG_MSG("MACCtrlFrame interrupt\n");
-
-	/* If RxComplete interrupt, do nothing. */
-	if (status & IPG_IS_RX_COMPLETE)
-		IPG_DEBUG_MSG("RxComplete interrupt\n");
-
-	/* If RxEarly interrupt, do nothing. */
-	if (status & IPG_IS_RX_EARLY)
-		IPG_DEBUG_MSG("RxEarly interrupt\n");
-
-out_enable:
-	/* Re-enable IPG interrupts. */
-	ipg_w16(IPG_IE_TX_DMA_COMPLETE | IPG_IE_RX_DMA_COMPLETE |
-		IPG_IE_HOST_ERROR | IPG_IE_INT_REQUESTED | IPG_IE_TX_COMPLETE |
-		IPG_IE_LINK_EVENT | IPG_IE_UPDATE_STATS, INT_ENABLE);
-out_unlock:
-	spin_unlock(&sp->lock);
-
-	return IRQ_RETVAL(handled);
-}
-
-static void ipg_rx_clear(struct ipg_nic_private *sp)
-{
-	unsigned int i;
-
-	for (i = 0; i < IPG_RFDLIST_LENGTH; i++) {
-		if (sp->rx_buff[i]) {
-			struct ipg_rx *rxfd = sp->rxd + i;
-
-			dev_kfree_skb_irq(sp->rx_buff[i]);
-			sp->rx_buff[i] = NULL;
-			pci_unmap_single(sp->pdev,
-				le64_to_cpu(rxfd->frag_info) & ~IPG_RFI_FRAGLEN,
-				sp->rx_buf_sz, PCI_DMA_FROMDEVICE);
-		}
-	}
-}
-
-static void ipg_tx_clear(struct ipg_nic_private *sp)
-{
-	unsigned int i;
-
-	for (i = 0; i < IPG_TFDLIST_LENGTH; i++) {
-		if (sp->tx_buff[i]) {
-			struct ipg_tx *txfd = sp->txd + i;
-
-			pci_unmap_single(sp->pdev,
-				le64_to_cpu(txfd->frag_info) & ~IPG_TFI_FRAGLEN,
-				sp->tx_buff[i]->len, PCI_DMA_TODEVICE);
-
-			dev_kfree_skb_irq(sp->tx_buff[i]);
-
-			sp->tx_buff[i] = NULL;
-		}
-	}
-}
-
-static int ipg_nic_open(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	struct pci_dev *pdev = sp->pdev;
-	int rc;
-
-	IPG_DEBUG_MSG("_nic_open\n");
-
-	sp->rx_buf_sz = sp->rxsupport_size;
-
-	/* Check for interrupt line conflicts, and request interrupt
-	 * line for IPG.
-	 *
-	 * IMPORTANT: Disable IPG interrupts prior to registering
-	 *            IRQ.
-	 */
-	ipg_w16(0x0000, INT_ENABLE);
-
-	/* Register the interrupt line to be used by the IPG within
-	 * the Linux system.
-	 */
-	rc = request_irq(pdev->irq, ipg_interrupt_handler, IRQF_SHARED,
-			 dev->name, dev);
-	if (rc < 0) {
-		netdev_info(dev, "Error when requesting interrupt\n");
-		goto out;
-	}
-
-	dev->irq = pdev->irq;
-
-	rc = -ENOMEM;
-
-	sp->rxd = dma_alloc_coherent(&pdev->dev, IPG_RX_RING_BYTES,
-				     &sp->rxd_map, GFP_KERNEL);
-	if (!sp->rxd)
-		goto err_free_irq_0;
-
-	sp->txd = dma_alloc_coherent(&pdev->dev, IPG_TX_RING_BYTES,
-				     &sp->txd_map, GFP_KERNEL);
-	if (!sp->txd)
-		goto err_free_rx_1;
-
-	rc = init_rfdlist(dev);
-	if (rc < 0) {
-		netdev_info(dev, "Error during configuration\n");
-		goto err_free_tx_2;
-	}
-
-	init_tfdlist(dev);
-
-	rc = ipg_io_config(dev);
-	if (rc < 0) {
-		netdev_info(dev, "Error during configuration\n");
-		goto err_release_tfdlist_3;
-	}
-
-	/* Resolve autonegotiation. */
-	if (ipg_config_autoneg(dev) < 0)
-		netdev_info(dev, "Auto-negotiation error\n");
-
-	/* initialize JUMBO Frame control variable */
-	sp->jumbo.found_start = 0;
-	sp->jumbo.current_size = 0;
-	sp->jumbo.skb = NULL;
-
-	/* Enable transmit and receive operation of the IPG. */
-	ipg_w32((ipg_r32(MAC_CTRL) | IPG_MC_RX_ENABLE | IPG_MC_TX_ENABLE) &
-		 IPG_MC_RSVD_MASK, MAC_CTRL);
-
-	netif_start_queue(dev);
-out:
-	return rc;
-
-err_release_tfdlist_3:
-	ipg_tx_clear(sp);
-	ipg_rx_clear(sp);
-err_free_tx_2:
-	dma_free_coherent(&pdev->dev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
-err_free_rx_1:
-	dma_free_coherent(&pdev->dev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
-err_free_irq_0:
-	free_irq(pdev->irq, dev);
-	goto out;
-}
-
-static int ipg_nic_stop(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	struct pci_dev *pdev = sp->pdev;
-
-	IPG_DEBUG_MSG("_nic_stop\n");
-
-	netif_stop_queue(dev);
-
-	IPG_DUMPTFDLIST(dev);
-
-	do {
-		(void) ipg_r16(INT_STATUS_ACK);
-
-		ipg_reset(dev, IPG_AC_GLOBAL_RESET | IPG_AC_HOST | IPG_AC_DMA);
-
-		synchronize_irq(pdev->irq);
-	} while (ipg_r16(INT_ENABLE) & IPG_IE_RSVD_MASK);
-
-	ipg_rx_clear(sp);
-
-	ipg_tx_clear(sp);
-
-	pci_free_consistent(pdev, IPG_RX_RING_BYTES, sp->rxd, sp->rxd_map);
-	pci_free_consistent(pdev, IPG_TX_RING_BYTES, sp->txd, sp->txd_map);
-
-	free_irq(pdev->irq, dev);
-
-	return 0;
-}
-
-static netdev_tx_t ipg_nic_hard_start_xmit(struct sk_buff *skb,
-					   struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	unsigned int entry = sp->tx_current % IPG_TFDLIST_LENGTH;
-	unsigned long flags;
-	struct ipg_tx *txfd;
-
-	IPG_DDEBUG_MSG("_nic_hard_start_xmit\n");
-
-	/* If in 10Mbps mode, stop the transmit queue so
-	 * no more transmit frames are accepted.
-	 */
-	if (sp->tenmbpsmode)
-		netif_stop_queue(dev);
-
-	if (sp->reset_current_tfd) {
-		sp->reset_current_tfd = 0;
-		entry = 0;
-	}
-
-	txfd = sp->txd + entry;
-
-	sp->tx_buff[entry] = skb;
-
-	/* Clear all TFC fields, except TFDDONE. */
-	txfd->tfc = cpu_to_le64(IPG_TFC_TFDDONE);
-
-	/* Specify the TFC field within the TFD. */
-	txfd->tfc |= cpu_to_le64(IPG_TFC_WORDALIGNDISABLED |
-		(IPG_TFC_FRAMEID & sp->tx_current) |
-		(IPG_TFC_FRAGCOUNT & (1 << 24)));
-	/*
-	 * 16--17 (WordAlign) <- 3 (disable),
-	 * 0--15 (FrameId) <- sp->tx_current,
-	 * 24--27 (FragCount) <- 1
-	 */
-
-	/* Request TxComplete interrupts at an interval defined
-	 * by the constant IPG_FRAMESBETWEENTXCOMPLETES.
-	 * Request TxComplete interrupt for every frame
-	 * if in 10Mbps mode to accommodate problem with 10Mbps
-	 * processing.
-	 */
-	if (sp->tenmbpsmode)
-		txfd->tfc |= cpu_to_le64(IPG_TFC_TXINDICATE);
-	txfd->tfc |= cpu_to_le64(IPG_TFC_TXDMAINDICATE);
-	/* Based on compilation option, determine if FCS is to be
-	 * appended to transmit frame by IPG.
-	 */
-	if (!(IPG_APPEND_FCS_ON_TX))
-		txfd->tfc |= cpu_to_le64(IPG_TFC_FCSAPPENDDISABLE);
-
-	/* Based on compilation option, determine if IP, TCP and/or
-	 * UDP checksums are to be added to transmit frame by IPG.
-	 */
-	if (IPG_ADD_IPCHECKSUM_ON_TX)
-		txfd->tfc |= cpu_to_le64(IPG_TFC_IPCHECKSUMENABLE);
-
-	if (IPG_ADD_TCPCHECKSUM_ON_TX)
-		txfd->tfc |= cpu_to_le64(IPG_TFC_TCPCHECKSUMENABLE);
-
-	if (IPG_ADD_UDPCHECKSUM_ON_TX)
-		txfd->tfc |= cpu_to_le64(IPG_TFC_UDPCHECKSUMENABLE);
-
-	/* Based on compilation option, determine if VLAN tag info is to be
-	 * inserted into transmit frame by IPG.
-	 */
-	if (IPG_INSERT_MANUAL_VLAN_TAG) {
-		txfd->tfc |= cpu_to_le64(IPG_TFC_VLANTAGINSERT |
-			((u64) IPG_MANUAL_VLAN_VID << 32) |
-			((u64) IPG_MANUAL_VLAN_CFI << 44) |
-			((u64) IPG_MANUAL_VLAN_USERPRIORITY << 45));
-	}
-
-	/* The fragment start location within system memory is defined
-	 * by the sk_buff structure's data field. The physical address
-	 * of this location within the system's virtual memory space
-	 * is determined using the IPG_HOST2BUS_MAP function.
-	 */
-	txfd->frag_info = cpu_to_le64(pci_map_single(sp->pdev, skb->data,
-		skb->len, PCI_DMA_TODEVICE));
-
-	/* The length of the fragment within system memory is defined by
-	 * the sk_buff structure's len field.
-	 */
-	txfd->frag_info |= cpu_to_le64(IPG_TFI_FRAGLEN &
-		((u64) (skb->len & 0xffff) << 48));
-
-	/* Clear the TFDDone bit last to indicate the TFD is ready
-	 * for transfer to the IPG.
-	 */
-	txfd->tfc &= cpu_to_le64(~IPG_TFC_TFDDONE);
-
-	spin_lock_irqsave(&sp->lock, flags);
-
-	sp->tx_current++;
-
-	mmiowb();
-
-	ipg_w32(IPG_DC_TX_DMA_POLL_NOW, DMA_CTRL);
-
-	if (sp->tx_current == (sp->tx_dirty + IPG_TFDLIST_LENGTH))
-		netif_stop_queue(dev);
-
-	spin_unlock_irqrestore(&sp->lock, flags);
-
-	return NETDEV_TX_OK;
-}
-
-static void ipg_set_phy_default_param(unsigned char rev,
-				      struct net_device *dev, int phy_address)
-{
-	unsigned short length;
-	unsigned char revision;
-	const unsigned short *phy_param;
-	unsigned short address, value;
-
-	phy_param = &DefaultPhyParam[0];
-	length = *phy_param & 0x00FF;
-	revision = (unsigned char)((*phy_param) >> 8);
-	phy_param++;
-	while (length != 0) {
-		if (rev == revision) {
-			while (length > 1) {
-				address = *phy_param;
-				value = *(phy_param + 1);
-				phy_param += 2;
-				mdio_write(dev, phy_address, address, value);
-				length -= 4;
-			}
-			break;
-		} else {
-			phy_param += length / 2;
-			length = *phy_param & 0x00FF;
-			revision = (unsigned char)((*phy_param) >> 8);
-			phy_param++;
-		}
-	}
-}
-
-static int read_eeprom(struct net_device *dev, int eep_addr)
-{
-	void __iomem *ioaddr = ipg_ioaddr(dev);
-	unsigned int i;
-	int ret = 0;
-	u16 value;
-
-	value = IPG_EC_EEPROM_READOPCODE | (eep_addr & 0xff);
-	ipg_w16(value, EEPROM_CTRL);
-
-	for (i = 0; i < 1000; i++) {
-		u16 data;
-
-		mdelay(10);
-		data = ipg_r16(EEPROM_CTRL);
-		if (!(data & IPG_EC_EEPROM_BUSY)) {
-			ret = ipg_r16(EEPROM_DATA);
-			break;
-		}
-	}
-	return ret;
-}
-
-static void ipg_init_mii(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	struct mii_if_info *mii_if = &sp->mii_if;
-	int phyaddr;
-
-	mii_if->dev          = dev;
-	mii_if->mdio_read    = mdio_read;
-	mii_if->mdio_write   = mdio_write;
-	mii_if->phy_id_mask  = 0x1f;
-	mii_if->reg_num_mask = 0x1f;
-
-	mii_if->phy_id = phyaddr = ipg_find_phyaddr(dev);
-
-	if (phyaddr != 0x1f) {
-		u16 mii_phyctrl, mii_1000cr;
-
-		mii_1000cr  = mdio_read(dev, phyaddr, MII_CTRL1000);
-		mii_1000cr |= ADVERTISE_1000FULL | ADVERTISE_1000HALF |
-			GMII_PHY_1000BASETCONTROL_PreferMaster;
-		mdio_write(dev, phyaddr, MII_CTRL1000, mii_1000cr);
-
-		mii_phyctrl = mdio_read(dev, phyaddr, MII_BMCR);
-
-		/* Set default phyparam */
-		ipg_set_phy_default_param(sp->pdev->revision, dev, phyaddr);
-
-		/* Reset PHY */
-		mii_phyctrl |= BMCR_RESET | BMCR_ANRESTART;
-		mdio_write(dev, phyaddr, MII_BMCR, mii_phyctrl);
-
-	}
-}
-
-static int ipg_hw_init(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	void __iomem *ioaddr = sp->ioaddr;
-	unsigned int i;
-	int rc;
-
-	/* Read/Write and Reset EEPROM Value */
-	/* Read LED Mode Configuration from EEPROM */
-	sp->led_mode = read_eeprom(dev, 6);
-
-	/* Reset all functions within the IPG. Do not assert
-	 * RST_OUT as not compatible with some PHYs.
-	 */
-	rc = ipg_reset(dev, IPG_RESET_MASK);
-	if (rc < 0)
-		goto out;
-
-	ipg_init_mii(dev);
-
-	/* Read MAC Address from EEPROM */
-	for (i = 0; i < 3; i++)
-		sp->station_addr[i] = read_eeprom(dev, 16 + i);
-
-	for (i = 0; i < 3; i++)
-		ipg_w16(sp->station_addr[i], STATION_ADDRESS_0 + 2*i);
-
-	/* Set station address in ethernet_device structure. */
-	dev->dev_addr[0] =  ipg_r16(STATION_ADDRESS_0) & 0x00ff;
-	dev->dev_addr[1] = (ipg_r16(STATION_ADDRESS_0) & 0xff00) >> 8;
-	dev->dev_addr[2] =  ipg_r16(STATION_ADDRESS_1) & 0x00ff;
-	dev->dev_addr[3] = (ipg_r16(STATION_ADDRESS_1) & 0xff00) >> 8;
-	dev->dev_addr[4] =  ipg_r16(STATION_ADDRESS_2) & 0x00ff;
-	dev->dev_addr[5] = (ipg_r16(STATION_ADDRESS_2) & 0xff00) >> 8;
-out:
-	return rc;
-}
-
-static int ipg_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	int rc;
-
-	mutex_lock(&sp->mii_mutex);
-	rc = generic_mii_ioctl(&sp->mii_if, if_mii(ifr), cmd, NULL);
-	mutex_unlock(&sp->mii_mutex);
-
-	return rc;
-}
-
-static int ipg_nic_change_mtu(struct net_device *dev, int new_mtu)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	int err;
-
-	/* Function to accommodate changes to Maximum Transfer Unit
-	 * (or MTU) of IPG NIC. Cannot use default function since
-	 * the default will not allow for MTU > 1500 bytes.
-	 */
-
-	IPG_DEBUG_MSG("_nic_change_mtu\n");
-
-	/*
-	 * Check that the new MTU value is between 68 (14 byte header, 46 byte
-	 * payload, 4 byte FCS) and 10 KB, which is the largest supported MTU.
-	 */
-	if (new_mtu < 68 || new_mtu > 10240)
-		return -EINVAL;
-
-	err = ipg_nic_stop(dev);
-	if (err)
-		return err;
-
-	dev->mtu = new_mtu;
-
-	sp->max_rxframe_size = new_mtu;
-
-	sp->rxfrag_size = new_mtu;
-	if (sp->rxfrag_size > 4088)
-		sp->rxfrag_size = 4088;
-
-	sp->rxsupport_size = sp->max_rxframe_size;
-
-	if (new_mtu > 0x0600)
-		sp->is_jumbo = true;
-	else
-		sp->is_jumbo = false;
-
-	return ipg_nic_open(dev);
-}
-
-static int ipg_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	int rc;
-
-	mutex_lock(&sp->mii_mutex);
-	rc = mii_ethtool_gset(&sp->mii_if, cmd);
-	mutex_unlock(&sp->mii_mutex);
-
-	return rc;
-}
-
-static int ipg_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	int rc;
-
-	mutex_lock(&sp->mii_mutex);
-	rc = mii_ethtool_sset(&sp->mii_if, cmd);
-	mutex_unlock(&sp->mii_mutex);
-
-	return rc;
-}
-
-static int ipg_nway_reset(struct net_device *dev)
-{
-	struct ipg_nic_private *sp = netdev_priv(dev);
-	int rc;
-
-	mutex_lock(&sp->mii_mutex);
-	rc = mii_nway_restart(&sp->mii_if);
-	mutex_unlock(&sp->mii_mutex);
-
-	return rc;
-}
-
-static const struct ethtool_ops ipg_ethtool_ops = {
-	.get_settings = ipg_get_settings,
-	.set_settings = ipg_set_settings,
-	.nway_reset   = ipg_nway_reset,
-};
-
-static void ipg_remove(struct pci_dev *pdev)
-{
-	struct net_device *dev = pci_get_drvdata(pdev);
-	struct ipg_nic_private *sp = netdev_priv(dev);
-
-	IPG_DEBUG_MSG("_remove\n");
-
-	/* Un-register Ethernet device. */
-	unregister_netdev(dev);
-
-	pci_iounmap(pdev, sp->ioaddr);
-
-	pci_release_regions(pdev);
-
-	free_netdev(dev);
-	pci_disable_device(pdev);
-}
-
-static const struct net_device_ops ipg_netdev_ops = {
-	.ndo_open		= ipg_nic_open,
-	.ndo_stop		= ipg_nic_stop,
-	.ndo_start_xmit		= ipg_nic_hard_start_xmit,
-	.ndo_get_stats		= ipg_nic_get_stats,
-	.ndo_set_rx_mode	= ipg_nic_set_multicast_list,
-	.ndo_do_ioctl		= ipg_ioctl,
-	.ndo_tx_timeout 	= ipg_tx_timeout,
-	.ndo_change_mtu 	= ipg_nic_change_mtu,
-	.ndo_set_mac_address	= eth_mac_addr,
-	.ndo_validate_addr	= eth_validate_addr,
-};
-
-static int ipg_probe(struct pci_dev *pdev, const struct pci_device_id *id)
-{
-	unsigned int i = id->driver_data;
-	struct ipg_nic_private *sp;
-	struct net_device *dev;
-	void __iomem *ioaddr;
-	int rc;
-
-	rc = pci_enable_device(pdev);
-	if (rc < 0)
-		goto out;
-
-	pr_info("%s: %s\n", pci_name(pdev), ipg_brand_name[i]);
-
-	pci_set_master(pdev);
-
-	rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
-	if (rc < 0) {
-		rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
-		if (rc < 0) {
-			pr_err("%s: DMA config failed\n", pci_name(pdev));
-			goto err_disable_0;
-		}
-	}
-
-	/*
-	 * Initialize net device.
-	 */
-	dev = alloc_etherdev(sizeof(struct ipg_nic_private));
-	if (!dev) {
-		rc = -ENOMEM;
-		goto err_disable_0;
-	}
-
-	sp = netdev_priv(dev);
-	spin_lock_init(&sp->lock);
-	mutex_init(&sp->mii_mutex);
-
-	sp->is_jumbo = IPG_IS_JUMBO;
-	sp->rxfrag_size = IPG_RXFRAG_SIZE;
-	sp->rxsupport_size = IPG_RXSUPPORT_SIZE;
-	sp->max_rxframe_size = IPG_MAX_RXFRAME_SIZE;
-
-	/* Declare IPG NIC functions for Ethernet device methods.
-	 */
-	dev->netdev_ops = &ipg_netdev_ops;
-	SET_NETDEV_DEV(dev, &pdev->dev);
-	dev->ethtool_ops = &ipg_ethtool_ops;
-
-	rc = pci_request_regions(pdev, DRV_NAME);
-	if (rc)
-		goto err_free_dev_1;
-
-	ioaddr = pci_iomap(pdev, 1, pci_resource_len(pdev, 1));
-	if (!ioaddr) {
-		pr_err("%s: cannot map MMIO\n", pci_name(pdev));
-		rc = -EIO;
-		goto err_release_regions_2;
-	}
-
-	/* Save the pointer to the PCI device information. */
-	sp->ioaddr = ioaddr;
-	sp->pdev = pdev;
-	sp->dev = dev;
-
-	INIT_DELAYED_WORK(&sp->task, ipg_reset_after_host_error);
-
-	pci_set_drvdata(pdev, dev);
-
-	rc = ipg_hw_init(dev);
-	if (rc < 0)
-		goto err_unmap_3;
-
-	rc = register_netdev(dev);
-	if (rc < 0)
-		goto err_unmap_3;
-
-	netdev_info(dev, "Ethernet device registered\n");
-out:
-	return rc;
-
-err_unmap_3:
-	pci_iounmap(pdev, ioaddr);
-err_release_regions_2:
-	pci_release_regions(pdev);
-err_free_dev_1:
-	free_netdev(dev);
-err_disable_0:
-	pci_disable_device(pdev);
-	goto out;
-}
-
-static struct pci_driver ipg_pci_driver = {
-	.name		= IPG_DRIVER_NAME,
-	.id_table	= ipg_pci_tbl,
-	.probe		= ipg_probe,
-	.remove		= ipg_remove,
-};
-
-module_pci_driver(ipg_pci_driver);
diff --git a/drivers/net/ethernet/icplus/ipg.h b/drivers/net/ethernet/icplus/ipg.h
deleted file mode 100644
index de60628..0000000
--- a/drivers/net/ethernet/icplus/ipg.h
+++ /dev/null
@@ -1,748 +0,0 @@
-/*
- * Include file for Gigabit Ethernet device driver for Network
- * Interface Cards (NICs) utilizing the Tamarack Microelectronics
- * Inc. IPG Gigabit or Triple Speed Ethernet Media Access
- * Controller.
- */
-#ifndef __LINUX_IPG_H
-#define __LINUX_IPG_H
-
-#include <linux/module.h>
-
-#include <linux/kernel.h>
-#include <linux/pci.h>
-#include <linux/ioport.h>
-#include <linux/errno.h>
-#include <asm/io.h>
-#include <linux/delay.h>
-#include <linux/types.h>
-#include <linux/netdevice.h>
-#include <linux/etherdevice.h>
-#include <linux/skbuff.h>
-#include <asm/bitops.h>
-
-/*
- *	Constants
- */
-
-/* GMII based PHY IDs */
-#define		NS				0x2000
-#define		MARVELL				0x0141
-#define		ICPLUS_PHY			0x243
-
-/* NIC Physical Layer Device MII register fields. */
-#define         MII_PHY_SELECTOR_IEEE8023       0x0001
-#define         MII_PHY_TECHABILITYFIELD        0x1FE0
-
-/* GMII_PHY_1000 need to set to prefer master */
-#define         GMII_PHY_1000BASETCONTROL_PreferMaster 0x0400
-
-/* NIC Physical Layer Device GMII constants. */
-#define         GMII_PREAMBLE                    0xFFFFFFFF
-#define         GMII_ST                          0x1
-#define         GMII_READ                        0x2
-#define         GMII_WRITE                       0x1
-#define         GMII_TA_READ_MASK                0x1
-#define         GMII_TA_WRITE                    0x2
-
-/* I/O register offsets. */
-enum ipg_regs {
-	DMA_CTRL		= 0x00,
-	RX_DMA_STATUS		= 0x08, /* Unused + reserved */
-	TFD_LIST_PTR_0		= 0x10,
-	TFD_LIST_PTR_1		= 0x14,
-	TX_DMA_BURST_THRESH	= 0x18,
-	TX_DMA_URGENT_THRESH	= 0x19,
-	TX_DMA_POLL_PERIOD	= 0x1a,
-	RFD_LIST_PTR_0		= 0x1c,
-	RFD_LIST_PTR_1		= 0x20,
-	RX_DMA_BURST_THRESH	= 0x24,
-	RX_DMA_URGENT_THRESH	= 0x25,
-	RX_DMA_POLL_PERIOD	= 0x26,
-	DEBUG_CTRL		= 0x2c,
-	ASIC_CTRL		= 0x30,
-	FIFO_CTRL		= 0x38, /* Unused */
-	FLOW_OFF_THRESH		= 0x3c,
-	FLOW_ON_THRESH		= 0x3e,
-	EEPROM_DATA		= 0x48,
-	EEPROM_CTRL		= 0x4a,
-	EXPROM_ADDR		= 0x4c, /* Unused */
-	EXPROM_DATA		= 0x50, /* Unused */
-	WAKE_EVENT		= 0x51, /* Unused */
-	COUNTDOWN		= 0x54, /* Unused */
-	INT_STATUS_ACK		= 0x5a,
-	INT_ENABLE		= 0x5c,
-	INT_STATUS		= 0x5e, /* Unused */
-	TX_STATUS		= 0x60,
-	MAC_CTRL		= 0x6c,
-	VLAN_TAG		= 0x70, /* Unused */
-	PHY_SET			= 0x75,
-	PHY_CTRL		= 0x76,
-	STATION_ADDRESS_0	= 0x78,
-	STATION_ADDRESS_1	= 0x7a,
-	STATION_ADDRESS_2	= 0x7c,
-	MAX_FRAME_SIZE		= 0x86,
-	RECEIVE_MODE		= 0x88,
-	HASHTABLE_0		= 0x8c,
-	HASHTABLE_1		= 0x90,
-	RMON_STATISTICS_MASK	= 0x98,
-	STATISTICS_MASK		= 0x9c,
-	RX_JUMBO_FRAMES		= 0xbc, /* Unused */
-	TCP_CHECKSUM_ERRORS	= 0xc0, /* Unused */
-	IP_CHECKSUM_ERRORS	= 0xc2, /* Unused */
-	UDP_CHECKSUM_ERRORS	= 0xc4, /* Unused */
-	TX_JUMBO_FRAMES		= 0xf4  /* Unused */
-};
-
-/* Ethernet MIB statistic register offsets. */
-#define	IPG_OCTETRCVOK			0xA8
-#define	IPG_MCSTOCTETRCVDOK		0xAC
-#define	IPG_BCSTOCTETRCVOK		0xB0
-#define	IPG_FRAMESRCVDOK		0xB4
-#define	IPG_MCSTFRAMESRCVDOK		0xB8
-#define	IPG_BCSTFRAMESRCVDOK		0xBE
-#define	IPG_MACCONTROLFRAMESRCVD	0xC6
-#define	IPG_FRAMETOOLONGERRORS		0xC8
-#define	IPG_INRANGELENGTHERRORS		0xCA
-#define	IPG_FRAMECHECKSEQERRORS		0xCC
-#define	IPG_FRAMESLOSTRXERRORS		0xCE
-#define	IPG_OCTETXMTOK			0xD0
-#define	IPG_MCSTOCTETXMTOK		0xD4
-#define	IPG_BCSTOCTETXMTOK		0xD8
-#define	IPG_FRAMESXMTDOK		0xDC
-#define	IPG_MCSTFRAMESXMTDOK		0xE0
-#define	IPG_FRAMESWDEFERREDXMT		0xE4
-#define	IPG_LATECOLLISIONS		0xE8
-#define	IPG_MULTICOLFRAMES		0xEC
-#define	IPG_SINGLECOLFRAMES		0xF0
-#define	IPG_BCSTFRAMESXMTDOK		0xF6
-#define	IPG_CARRIERSENSEERRORS		0xF8
-#define	IPG_MACCONTROLFRAMESXMTDOK	0xFA
-#define	IPG_FRAMESABORTXSCOLLS		0xFC
-#define	IPG_FRAMESWEXDEFERRAL		0xFE
-
-/* RMON statistic register offsets. */
-#define	IPG_ETHERSTATSCOLLISIONS			0x100
-#define	IPG_ETHERSTATSOCTETSTRANSMIT			0x104
-#define	IPG_ETHERSTATSPKTSTRANSMIT			0x108
-#define	IPG_ETHERSTATSPKTS64OCTESTSTRANSMIT		0x10C
-#define	IPG_ETHERSTATSPKTS65TO127OCTESTSTRANSMIT	0x110
-#define	IPG_ETHERSTATSPKTS128TO255OCTESTSTRANSMIT	0x114
-#define	IPG_ETHERSTATSPKTS256TO511OCTESTSTRANSMIT	0x118
-#define	IPG_ETHERSTATSPKTS512TO1023OCTESTSTRANSMIT	0x11C
-#define	IPG_ETHERSTATSPKTS1024TO1518OCTESTSTRANSMIT	0x120
-#define	IPG_ETHERSTATSCRCALIGNERRORS			0x124
-#define	IPG_ETHERSTATSUNDERSIZEPKTS			0x128
-#define	IPG_ETHERSTATSFRAGMENTS				0x12C
-#define	IPG_ETHERSTATSJABBERS				0x130
-#define	IPG_ETHERSTATSOCTETS				0x134
-#define	IPG_ETHERSTATSPKTS				0x138
-#define	IPG_ETHERSTATSPKTS64OCTESTS			0x13C
-#define	IPG_ETHERSTATSPKTS65TO127OCTESTS		0x140
-#define	IPG_ETHERSTATSPKTS128TO255OCTESTS		0x144
-#define	IPG_ETHERSTATSPKTS256TO511OCTESTS		0x148
-#define	IPG_ETHERSTATSPKTS512TO1023OCTESTS		0x14C
-#define	IPG_ETHERSTATSPKTS1024TO1518OCTESTS		0x150
-
-/* RMON statistic register equivalents. */
-#define	IPG_ETHERSTATSMULTICASTPKTSTRANSMIT		0xE0
-#define	IPG_ETHERSTATSBROADCASTPKTSTRANSMIT		0xF6
-#define	IPG_ETHERSTATSMULTICASTPKTS			0xB8
-#define	IPG_ETHERSTATSBROADCASTPKTS			0xBE
-#define	IPG_ETHERSTATSOVERSIZEPKTS			0xC8
-#define	IPG_ETHERSTATSDROPEVENTS			0xCE
-
-/* Serial EEPROM offsets */
-#define	IPG_EEPROM_CONFIGPARAM		0x00
-#define	IPG_EEPROM_ASICCTRL		0x01
-#define	IPG_EEPROM_SUBSYSTEMVENDORID	0x02
-#define	IPG_EEPROM_SUBSYSTEMID		0x03
-#define	IPG_EEPROM_STATIONADDRESS0	0x10
-#define	IPG_EEPROM_STATIONADDRESS1	0x11
-#define	IPG_EEPROM_STATIONADDRESS2	0x12
-
-/* Register & data structure bit masks */
-
-/* PCI register masks. */
-
-/* IOBaseAddress */
-#define         IPG_PIB_RSVD_MASK		0xFFFFFE01
-#define         IPG_PIB_IOBASEADDRESS		0xFFFFFF00
-#define         IPG_PIB_IOBASEADDRIND		0x00000001
-
-/* MemBaseAddress */
-#define         IPG_PMB_RSVD_MASK		0xFFFFFE07
-#define         IPG_PMB_MEMBASEADDRIND		0x00000001
-#define         IPG_PMB_MEMMAPTYPE		0x00000006
-#define         IPG_PMB_MEMMAPTYPE0		0x00000002
-#define         IPG_PMB_MEMMAPTYPE1		0x00000004
-#define         IPG_PMB_MEMBASEADDRESS		0xFFFFFE00
-
-/* ConfigStatus */
-#define IPG_CS_RSVD_MASK                0xFFB0
-#define IPG_CS_CAPABILITIES             0x0010
-#define IPG_CS_66MHZCAPABLE             0x0020
-#define IPG_CS_FASTBACK2BACK            0x0080
-#define IPG_CS_DATAPARITYREPORTED       0x0100
-#define IPG_CS_DEVSELTIMING             0x0600
-#define IPG_CS_SIGNALEDTARGETABORT      0x0800
-#define IPG_CS_RECEIVEDTARGETABORT      0x1000
-#define IPG_CS_RECEIVEDMASTERABORT      0x2000
-#define IPG_CS_SIGNALEDSYSTEMERROR      0x4000
-#define IPG_CS_DETECTEDPARITYERROR      0x8000
-
-/* TFD data structure masks. */
-
-/* TFDList, TFC */
-#define	IPG_TFC_RSVD_MASK			0x0000FFFF9FFFFFFFULL
-#define	IPG_TFC_FRAMEID				0x000000000000FFFFULL
-#define	IPG_TFC_WORDALIGN			0x0000000000030000ULL
-#define	IPG_TFC_WORDALIGNTODWORD		0x0000000000000000ULL
-#define	IPG_TFC_WORDALIGNTOWORD			0x0000000000020000ULL
-#define	IPG_TFC_WORDALIGNDISABLED		0x0000000000030000ULL
-#define	IPG_TFC_TCPCHECKSUMENABLE		0x0000000000040000ULL
-#define	IPG_TFC_UDPCHECKSUMENABLE		0x0000000000080000ULL
-#define	IPG_TFC_IPCHECKSUMENABLE		0x0000000000100000ULL
-#define	IPG_TFC_FCSAPPENDDISABLE		0x0000000000200000ULL
-#define	IPG_TFC_TXINDICATE			0x0000000000400000ULL
-#define	IPG_TFC_TXDMAINDICATE			0x0000000000800000ULL
-#define	IPG_TFC_FRAGCOUNT			0x000000000F000000ULL
-#define	IPG_TFC_VLANTAGINSERT			0x0000000010000000ULL
-#define	IPG_TFC_TFDDONE				0x0000000080000000ULL
-#define	IPG_TFC_VID				0x00000FFF00000000ULL
-#define	IPG_TFC_CFI				0x0000100000000000ULL
-#define	IPG_TFC_USERPRIORITY			0x0000E00000000000ULL
-
-/* TFDList, FragInfo */
-#define	IPG_TFI_RSVD_MASK			0xFFFF00FFFFFFFFFFULL
-#define	IPG_TFI_FRAGADDR			0x000000FFFFFFFFFFULL
-#define	IPG_TFI_FRAGLEN				0xFFFF000000000000ULL
-
-/* RFD data structure masks. */
-
-/* RFDList, RFS */
-#define	IPG_RFS_RSVD_MASK			0x0000FFFFFFFFFFFFULL
-#define	IPG_RFS_RXFRAMELEN			0x000000000000FFFFULL
-#define	IPG_RFS_RXFIFOOVERRUN			0x0000000000010000ULL
-#define	IPG_RFS_RXRUNTFRAME			0x0000000000020000ULL
-#define	IPG_RFS_RXALIGNMENTERROR		0x0000000000040000ULL
-#define	IPG_RFS_RXFCSERROR			0x0000000000080000ULL
-#define	IPG_RFS_RXOVERSIZEDFRAME		0x0000000000100000ULL
-#define	IPG_RFS_RXLENGTHERROR			0x0000000000200000ULL
-#define	IPG_RFS_VLANDETECTED			0x0000000000400000ULL
-#define	IPG_RFS_TCPDETECTED			0x0000000000800000ULL
-#define	IPG_RFS_TCPERROR			0x0000000001000000ULL
-#define	IPG_RFS_UDPDETECTED			0x0000000002000000ULL
-#define	IPG_RFS_UDPERROR			0x0000000004000000ULL
-#define	IPG_RFS_IPDETECTED			0x0000000008000000ULL
-#define	IPG_RFS_IPERROR				0x0000000010000000ULL
-#define	IPG_RFS_FRAMESTART			0x0000000020000000ULL
-#define	IPG_RFS_FRAMEEND			0x0000000040000000ULL
-#define	IPG_RFS_RFDDONE				0x0000000080000000ULL
-#define	IPG_RFS_TCI				0x0000FFFF00000000ULL
-
-/* RFDList, FragInfo */
-#define	IPG_RFI_RSVD_MASK			0xFFFF00FFFFFFFFFFULL
-#define	IPG_RFI_FRAGADDR			0x000000FFFFFFFFFFULL
-#define	IPG_RFI_FRAGLEN				0xFFFF000000000000ULL
-
-/* I/O Register masks. */
-
-/* RMON Statistics Mask */
-#define	IPG_RZ_ALL					0x0FFFFFFF
-
-/* Statistics Mask */
-#define	IPG_SM_ALL					0x0FFFFFFF
-#define	IPG_SM_OCTETRCVOK_FRAMESRCVDOK			0x00000001
-#define	IPG_SM_MCSTOCTETRCVDOK_MCSTFRAMESRCVDOK		0x00000002
-#define	IPG_SM_BCSTOCTETRCVDOK_BCSTFRAMESRCVDOK		0x00000004
-#define	IPG_SM_RXJUMBOFRAMES				0x00000008
-#define	IPG_SM_TCPCHECKSUMERRORS			0x00000010
-#define	IPG_SM_IPCHECKSUMERRORS				0x00000020
-#define	IPG_SM_UDPCHECKSUMERRORS			0x00000040
-#define	IPG_SM_MACCONTROLFRAMESRCVD			0x00000080
-#define	IPG_SM_FRAMESTOOLONGERRORS			0x00000100
-#define	IPG_SM_INRANGELENGTHERRORS			0x00000200
-#define	IPG_SM_FRAMECHECKSEQERRORS			0x00000400
-#define	IPG_SM_FRAMESLOSTRXERRORS			0x00000800
-#define	IPG_SM_OCTETXMTOK_FRAMESXMTOK			0x00001000
-#define	IPG_SM_MCSTOCTETXMTOK_MCSTFRAMESXMTDOK		0x00002000
-#define	IPG_SM_BCSTOCTETXMTOK_BCSTFRAMESXMTDOK		0x00004000
-#define	IPG_SM_FRAMESWDEFERREDXMT			0x00008000
-#define	IPG_SM_LATECOLLISIONS				0x00010000
-#define	IPG_SM_MULTICOLFRAMES				0x00020000
-#define	IPG_SM_SINGLECOLFRAMES				0x00040000
-#define	IPG_SM_TXJUMBOFRAMES				0x00080000
-#define	IPG_SM_CARRIERSENSEERRORS			0x00100000
-#define	IPG_SM_MACCONTROLFRAMESXMTD			0x00200000
-#define	IPG_SM_FRAMESABORTXSCOLLS			0x00400000
-#define	IPG_SM_FRAMESWEXDEFERAL				0x00800000
-
-/* Countdown */
-#define	IPG_CD_RSVD_MASK		0x0700FFFF
-#define	IPG_CD_COUNT			0x0000FFFF
-#define	IPG_CD_COUNTDOWNSPEED		0x01000000
-#define	IPG_CD_COUNTDOWNMODE		0x02000000
-#define	IPG_CD_COUNTINTENABLED		0x04000000
-
-/* TxDMABurstThresh */
-#define IPG_TB_RSVD_MASK                0xFF
-
-/* TxDMAUrgentThresh */
-#define IPG_TU_RSVD_MASK                0xFF
-
-/* TxDMAPollPeriod */
-#define IPG_TP_RSVD_MASK                0xFF
-
-/* RxDMAUrgentThresh */
-#define IPG_RU_RSVD_MASK                0xFF
-
-/* RxDMAPollPeriod */
-#define IPG_RP_RSVD_MASK                0xFF
-
-/* ReceiveMode */
-#define IPG_RM_RSVD_MASK                0x3F
-#define IPG_RM_RECEIVEUNICAST           0x01
-#define IPG_RM_RECEIVEMULTICAST         0x02
-#define IPG_RM_RECEIVEBROADCAST         0x04
-#define IPG_RM_RECEIVEALLFRAMES         0x08
-#define IPG_RM_RECEIVEMULTICASTHASH     0x10
-#define IPG_RM_RECEIVEIPMULTICAST       0x20
-
-/* PhySet */
-#define IPG_PS_MEM_LENB9B               0x01
-#define IPG_PS_MEM_LEN9                 0x02
-#define IPG_PS_NON_COMPDET              0x04
-
-/* PhyCtrl */
-#define IPG_PC_RSVD_MASK                0xFF
-#define IPG_PC_MGMTCLK_LO               0x00
-#define IPG_PC_MGMTCLK_HI               0x01
-#define IPG_PC_MGMTCLK                  0x01
-#define IPG_PC_MGMTDATA                 0x02
-#define IPG_PC_MGMTDIR                  0x04
-#define IPG_PC_DUPLEX_POLARITY          0x08
-#define IPG_PC_DUPLEX_STATUS            0x10
-#define IPG_PC_LINK_POLARITY            0x20
-#define IPG_PC_LINK_SPEED               0xC0
-#define IPG_PC_LINK_SPEED_10MBPS        0x40
-#define IPG_PC_LINK_SPEED_100MBPS       0x80
-#define IPG_PC_LINK_SPEED_1000MBPS      0xC0
-
-/* DMACtrl */
-#define IPG_DC_RSVD_MASK                0xC07D9818
-#define IPG_DC_RX_DMA_COMPLETE          0x00000008
-#define IPG_DC_RX_DMA_POLL_NOW          0x00000010
-#define IPG_DC_TX_DMA_COMPLETE          0x00000800
-#define IPG_DC_TX_DMA_POLL_NOW          0x00001000
-#define IPG_DC_TX_DMA_IN_PROG           0x00008000
-#define IPG_DC_RX_EARLY_DISABLE         0x00010000
-#define IPG_DC_MWI_DISABLE              0x00040000
-#define IPG_DC_TX_WRITE_BACK_DISABLE    0x00080000
-#define IPG_DC_TX_BURST_LIMIT           0x00700000
-#define IPG_DC_TARGET_ABORT             0x40000000
-#define IPG_DC_MASTER_ABORT             0x80000000
-
-/* ASICCtrl */
-#define IPG_AC_RSVD_MASK                0x07FFEFF2
-#define IPG_AC_EXP_ROM_SIZE             0x00000002
-#define IPG_AC_PHY_SPEED10              0x00000010
-#define IPG_AC_PHY_SPEED100             0x00000020
-#define IPG_AC_PHY_SPEED1000            0x00000040
-#define IPG_AC_PHY_MEDIA                0x00000080
-#define IPG_AC_FORCED_CFG               0x00000700
-#define IPG_AC_D3RESETDISABLE           0x00000800
-#define IPG_AC_SPEED_UP_MODE            0x00002000
-#define IPG_AC_LED_MODE                 0x00004000
-#define IPG_AC_RST_OUT_POLARITY         0x00008000
-#define IPG_AC_GLOBAL_RESET             0x00010000
-#define IPG_AC_RX_RESET                 0x00020000
-#define IPG_AC_TX_RESET                 0x00040000
-#define IPG_AC_DMA                      0x00080000
-#define IPG_AC_FIFO                     0x00100000
-#define IPG_AC_NETWORK                  0x00200000
-#define IPG_AC_HOST                     0x00400000
-#define IPG_AC_AUTO_INIT                0x00800000
-#define IPG_AC_RST_OUT                  0x01000000
-#define IPG_AC_INT_REQUEST              0x02000000
-#define IPG_AC_RESET_BUSY               0x04000000
-#define IPG_AC_LED_SPEED                0x08000000
-#define IPG_AC_LED_MODE_BIT_1           0x20000000
-
-/* EepromCtrl */
-#define IPG_EC_RSVD_MASK                0x83FF
-#define IPG_EC_EEPROM_ADDR              0x00FF
-#define IPG_EC_EEPROM_OPCODE            0x0300
-#define IPG_EC_EEPROM_SUBCOMMAD         0x0000
-#define IPG_EC_EEPROM_WRITEOPCODE       0x0100
-#define IPG_EC_EEPROM_READOPCODE        0x0200
-#define IPG_EC_EEPROM_ERASEOPCODE       0x0300
-#define IPG_EC_EEPROM_BUSY              0x8000
-
-/* FIFOCtrl */
-#define IPG_FC_RSVD_MASK                0xC001
-#define IPG_FC_RAM_TEST_MODE            0x0001
-#define IPG_FC_TRANSMITTING             0x4000
-#define IPG_FC_RECEIVING                0x8000
-
-/* TxStatus */
-#define IPG_TS_RSVD_MASK                0xFFFF00DD
-#define IPG_TS_TX_ERROR                 0x00000001
-#define IPG_TS_LATE_COLLISION           0x00000004
-#define IPG_TS_TX_MAX_COLL              0x00000008
-#define IPG_TS_TX_UNDERRUN              0x00000010
-#define IPG_TS_TX_IND_REQD              0x00000040
-#define IPG_TS_TX_COMPLETE              0x00000080
-#define IPG_TS_TX_FRAMEID               0xFFFF0000
-
-/* WakeEvent */
-#define IPG_WE_WAKE_PKT_ENABLE          0x01
-#define IPG_WE_MAGIC_PKT_ENABLE         0x02
-#define IPG_WE_LINK_EVT_ENABLE          0x04
-#define IPG_WE_WAKE_POLARITY            0x08
-#define IPG_WE_WAKE_PKT_EVT             0x10
-#define IPG_WE_MAGIC_PKT_EVT            0x20
-#define IPG_WE_LINK_EVT                 0x40
-#define IPG_WE_WOL_ENABLE               0x80
-
-/* IntEnable */
-#define IPG_IE_RSVD_MASK                0x1FFE
-#define IPG_IE_HOST_ERROR               0x0002
-#define IPG_IE_TX_COMPLETE              0x0004
-#define IPG_IE_MAC_CTRL_FRAME           0x0008
-#define IPG_IE_RX_COMPLETE              0x0010
-#define IPG_IE_RX_EARLY                 0x0020
-#define IPG_IE_INT_REQUESTED            0x0040
-#define IPG_IE_UPDATE_STATS             0x0080
-#define IPG_IE_LINK_EVENT               0x0100
-#define IPG_IE_TX_DMA_COMPLETE          0x0200
-#define IPG_IE_RX_DMA_COMPLETE          0x0400
-#define IPG_IE_RFD_LIST_END             0x0800
-#define IPG_IE_RX_DMA_PRIORITY          0x1000
-
-/* IntStatus */
-#define IPG_IS_RSVD_MASK                0x1FFF
-#define IPG_IS_INTERRUPT_STATUS         0x0001
-#define IPG_IS_HOST_ERROR               0x0002
-#define IPG_IS_TX_COMPLETE              0x0004
-#define IPG_IS_MAC_CTRL_FRAME           0x0008
-#define IPG_IS_RX_COMPLETE              0x0010
-#define IPG_IS_RX_EARLY                 0x0020
-#define IPG_IS_INT_REQUESTED            0x0040
-#define IPG_IS_UPDATE_STATS             0x0080
-#define IPG_IS_LINK_EVENT               0x0100
-#define IPG_IS_TX_DMA_COMPLETE          0x0200
-#define IPG_IS_RX_DMA_COMPLETE          0x0400
-#define IPG_IS_RFD_LIST_END             0x0800
-#define IPG_IS_RX_DMA_PRIORITY          0x1000
-
-/* MACCtrl */
-#define IPG_MC_RSVD_MASK                0x7FE33FA3
-#define IPG_MC_IFS_SELECT               0x00000003
-#define IPG_MC_IFS_4352BIT              0x00000003
-#define IPG_MC_IFS_1792BIT              0x00000002
-#define IPG_MC_IFS_1024BIT              0x00000001
-#define IPG_MC_IFS_96BIT                0x00000000
-#define IPG_MC_DUPLEX_SELECT            0x00000020
-#define IPG_MC_DUPLEX_SELECT_FD         0x00000020
-#define IPG_MC_DUPLEX_SELECT_HD         0x00000000
-#define IPG_MC_TX_FLOW_CONTROL_ENABLE   0x00000080
-#define IPG_MC_RX_FLOW_CONTROL_ENABLE   0x00000100
-#define IPG_MC_RCV_FCS                  0x00000200
-#define IPG_MC_FIFO_LOOPBACK            0x00000400
-#define IPG_MC_MAC_LOOPBACK             0x00000800
-#define IPG_MC_AUTO_VLAN_TAGGING        0x00001000
-#define IPG_MC_AUTO_VLAN_UNTAGGING      0x00002000
-#define IPG_MC_COLLISION_DETECT         0x00010000
-#define IPG_MC_CARRIER_SENSE            0x00020000
-#define IPG_MC_STATISTICS_ENABLE        0x00200000
-#define IPG_MC_STATISTICS_DISABLE       0x00400000
-#define IPG_MC_STATISTICS_ENABLED       0x00800000
-#define IPG_MC_TX_ENABLE                0x01000000
-#define IPG_MC_TX_DISABLE               0x02000000
-#define IPG_MC_TX_ENABLED               0x04000000
-#define IPG_MC_RX_ENABLE                0x08000000
-#define IPG_MC_RX_DISABLE               0x10000000
-#define IPG_MC_RX_ENABLED               0x20000000
-#define IPG_MC_PAUSED                   0x40000000
-
-/*
- *	Tune
- */
-
-/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS append on TX. */
-#define         IPG_APPEND_FCS_ON_TX         1
-
-/* Assign IPG_APPEND_FCS_ON_TX > 0 for auto FCS strip on RX. */
-#define         IPG_STRIP_FCS_ON_RX          1
-
-/* Assign IPG_DROP_ON_RX_ETH_ERRORS > 0 to drop RX frames with
- * Ethernet errors.
- */
-#define         IPG_DROP_ON_RX_ETH_ERRORS    1
-
-/* Assign IPG_INSERT_MANUAL_VLAN_TAG > 0 to insert VLAN tags manually
- * (via TFC).
- */
-#define		IPG_INSERT_MANUAL_VLAN_TAG   0
-
-/* Assign IPG_ADD_IPCHECKSUM_ON_TX > 0 for auto IP checksum on TX. */
-#define         IPG_ADD_IPCHECKSUM_ON_TX     0
-
-/* Assign IPG_ADD_TCPCHECKSUM_ON_TX > 0 for auto TCP checksum on TX.
- * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
- */
-#define         IPG_ADD_TCPCHECKSUM_ON_TX    0
-
-/* Assign IPG_ADD_UDPCHECKSUM_ON_TX > 0 for auto UDP checksum on TX.
- * DO NOT USE FOR SILICON REVISIONS B3 AND EARLIER.
- */
-#define         IPG_ADD_UDPCHECKSUM_ON_TX    0
-
-/* If inserting VLAN tags manually, assign the IPG_MANUAL_VLAN_xx
- * constants as desired.
- */
-#define		IPG_MANUAL_VLAN_VID		0xABC
-#define		IPG_MANUAL_VLAN_CFI		0x1
-#define		IPG_MANUAL_VLAN_USERPRIORITY 0x5
-
-#define         IPG_IO_REG_RANGE		0xFF
-#define         IPG_MEM_REG_RANGE		0x154
-#define         IPG_DRIVER_NAME		"Sundance Technology IPG Triple-Speed Ethernet"
-#define         IPG_NIC_PHY_ADDRESS          0x01
-#define		IPG_DMALIST_ALIGN_PAD	0x07
-#define		IPG_MULTICAST_HASHTABLE_SIZE	0x40
-
-/* Number of milliseconds to wait after issuing a software reset.
- * 0x05 <= IPG_AC_RESETWAIT to account for proper 10Mbps operation.
- */
-#define         IPG_AC_RESETWAIT             0x05
-
-/* Number of IPG_AC_RESETWAIT timeperiods before declaring timeout. */
-#define         IPG_AC_RESET_TIMEOUT         0x0A
-
-/* Minimum number of nanoseconds used to toggle MDC clock during
- * MII/GMII register access.
- */
-#define		IPG_PC_PHYCTRLWAIT_NS		200
-
-#define		IPG_TFDLIST_LENGTH		0x100
-
-/* Number of frames between TxDMAComplete interrupt.
- * 0 < IPG_FRAMESBETWEENTXDMACOMPLETES <= IPG_TFDLIST_LENGTH
- */
-#define		IPG_FRAMESBETWEENTXDMACOMPLETES 0x1
-
-#define		IPG_RFDLIST_LENGTH		0x100
-
-/* Maximum number of RFDs to process per interrupt.
- * 1 < IPG_MAXRFDPROCESS_COUNT < IPG_RFDLIST_LENGTH
- */
-#define		IPG_MAXRFDPROCESS_COUNT	0x80
-
-/* Minimum margin between last freed RFD, and current RFD.
- * 1 < IPG_MINUSEDRFDSTOFREE < IPG_RFDLIST_LENGTH
- */
-#define		IPG_MINUSEDRFDSTOFREE	0x80
-
-/* specify the jumbo frame maximum size
- * per unit is 0x600 (the rx_buffer size that one RFD can carry)
- */
-#define     MAX_JUMBOSIZE	        0x8	/* max is 12K */
-
-/* Key register values loaded at driver start up. */
-
-/* TXDMAPollPeriod is specified in 320ns increments.
- *
- * Value	Time
- * ---------------------
- * 0x00-0x01	320ns
- * 0x03		~1us
- * 0x1F		~10us
- * 0xFF		~82us
- */
-#define		IPG_TXDMAPOLLPERIOD_VALUE	0x26
-
-/* TxDMAUrgentThresh specifies the minimum amount of
- * data in the transmit FIFO before asserting an
- * urgent transmit DMA request.
- *
- * Value	Min TxFIFO occupied space before urgent TX request
- * ---------------------------------------------------------------
- * 0x00-0x04	128 bytes (1024 bits)
- * 0x27		1248 bytes (~10000 bits)
- * 0x30		1536 bytes (12288 bits)
- * 0xFF		8192 bytes (65535 bits)
- */
-#define		IPG_TXDMAURGENTTHRESH_VALUE	0x04
-
-/* TxDMABurstThresh specifies the minimum amount of
- * free space in the transmit FIFO before asserting an
- * transmit DMA request.
- *
- * Value	Min TxFIFO free space before TX request
- * ----------------------------------------------------
- * 0x00-0x08	256 bytes
- * 0x30		1536 bytes
- * 0xFF		8192 bytes
- */
-#define		IPG_TXDMABURSTTHRESH_VALUE	0x30
-
-/* RXDMAPollPeriod is specified in 320ns increments.
- *
- * Value	Time
- * ---------------------
- * 0x00-0x01	320ns
- * 0x03		~1us
- * 0x1F		~10us
- * 0xFF		~82us
- */
-#define		IPG_RXDMAPOLLPERIOD_VALUE	0x01
-
-/* RxDMAUrgentThresh specifies the minimum amount of
- * free space within the receive FIFO before asserting
- * a urgent receive DMA request.
- *
- * Value	Min RxFIFO free space before urgent RX request
- * ---------------------------------------------------------------
- * 0x00-0x04	128 bytes (1024 bits)
- * 0x27		1248 bytes (~10000 bits)
- * 0x30		1536 bytes (12288 bits)
- * 0xFF		8192 bytes (65535 bits)
- */
-#define		IPG_RXDMAURGENTTHRESH_VALUE	0x30
-
-/* RxDMABurstThresh specifies the minimum amount of
- * occupied space within the receive FIFO before asserting
- * a receive DMA request.
- *
- * Value	Min TxFIFO free space before TX request
- * ----------------------------------------------------
- * 0x00-0x08	256 bytes
- * 0x30		1536 bytes
- * 0xFF		8192 bytes
- */
-#define		IPG_RXDMABURSTTHRESH_VALUE	0x30
-
-/* FlowOnThresh specifies the maximum amount of occupied
- * space in the receive FIFO before a PAUSE frame with
- * maximum pause time transmitted.
- *
- * Value	Max RxFIFO occupied space before PAUSE
- * ---------------------------------------------------
- * 0x0000	0 bytes
- * 0x0740	29,696 bytes
- * 0x07FF	32,752 bytes
- */
-#define		IPG_FLOWONTHRESH_VALUE	0x0740
-
-/* FlowOffThresh specifies the minimum amount of occupied
- * space in the receive FIFO before a PAUSE frame with
- * zero pause time is transmitted.
- *
- * Value	Max RxFIFO occupied space before PAUSE
- * ---------------------------------------------------
- * 0x0000	0 bytes
- * 0x00BF	3056 bytes
- * 0x07FF	32,752 bytes
- */
-#define		IPG_FLOWOFFTHRESH_VALUE	0x00BF
-
-/*
- * Miscellaneous macros.
- */
-
-/* Macros for printing debug statements. */
-#ifdef IPG_DEBUG
-#  define IPG_DEBUG_MSG(fmt, args...)			\
-do {							\
-	if (0)						\
-		printk(KERN_DEBUG "IPG: " fmt, ##args);	\
-} while (0)
-#  define IPG_DDEBUG_MSG(fmt, args...)			\
-	printk(KERN_DEBUG "IPG: " fmt, ##args)
-#  define IPG_DUMPRFDLIST(args) ipg_dump_rfdlist(args)
-#  define IPG_DUMPTFDLIST(args) ipg_dump_tfdlist(args)
-#else
-#  define IPG_DEBUG_MSG(fmt, args...)			\
-do {							\
-	if (0)						\
-		printk(KERN_DEBUG "IPG: " fmt, ##args);	\
-} while (0)
-#  define IPG_DDEBUG_MSG(fmt, args...)			\
-do {							\
-	if (0)						\
-		printk(KERN_DEBUG "IPG: " fmt, ##args);	\
-} while (0)
-#  define IPG_DUMPRFDLIST(args)
-#  define IPG_DUMPTFDLIST(args)
-#endif
-
-/*
- * End miscellaneous macros.
- */
-
-/* Transmit Frame Descriptor. The IPG supports 15 fragments,
- * however Linux requires only a single fragment. Note, each
- * TFD field is 64 bits wide.
- */
-struct ipg_tx {
-	__le64 next_desc;
-	__le64 tfc;
-	__le64 frag_info;
-};
-
-/* Receive Frame Descriptor. Note, each RFD field is 64 bits wide.
- */
-struct ipg_rx {
-	__le64 next_desc;
-	__le64 rfs;
-	__le64 frag_info;
-};
-
-struct ipg_jumbo {
-	int found_start;
-	int current_size;
-	struct sk_buff *skb;
-};
-
-/* Structure of IPG NIC specific data. */
-struct ipg_nic_private {
-	void __iomem *ioaddr;
-	struct ipg_tx *txd;
-	struct ipg_rx *rxd;
-	dma_addr_t txd_map;
-	dma_addr_t rxd_map;
-	struct sk_buff *tx_buff[IPG_TFDLIST_LENGTH];
-	struct sk_buff *rx_buff[IPG_RFDLIST_LENGTH];
-	unsigned int tx_current;
-	unsigned int tx_dirty;
-	unsigned int rx_current;
-	unsigned int rx_dirty;
-	bool is_jumbo;
-	struct ipg_jumbo jumbo;
-	unsigned long rxfrag_size;
-	unsigned long rxsupport_size;
-	unsigned long max_rxframe_size;
-	unsigned int rx_buf_sz;
-	struct pci_dev *pdev;
-	struct net_device *dev;
-	struct net_device_stats stats;
-	spinlock_t lock;
-	int tenmbpsmode;
-
-	u16 led_mode;
-	u16 station_addr[3];	/* Station Address in EEPROM Reg 0x10..0x12 */
-
-	struct mutex		mii_mutex;
-	struct mii_if_info	mii_if;
-	int reset_current_tfd;
-#ifdef IPG_DEBUG
-	int RFDlistendCount;
-	int RFDListCheckedCount;
-	int EmptyRFDListCount;
-#endif
-	struct delayed_work task;
-};
-
-#endif				/* __LINUX_IPG_H */
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 85f1b1e..31c491e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -892,9 +892,10 @@
 		dev->caps.qp1_proxy[i - 1] = func_cap.qp1_proxy_qpn;
 		dev->caps.port_mask[i] = dev->caps.port_type[i];
 		dev->caps.phys_port_id[i] = func_cap.phys_port_id;
-		if (mlx4_get_slave_pkey_gid_tbl_len(dev, i,
-						    &dev->caps.gid_table_len[i],
-						    &dev->caps.pkey_table_len[i]))
+		err = mlx4_get_slave_pkey_gid_tbl_len(dev, i,
+						      &dev->caps.gid_table_len[i],
+						      &dev->caps.pkey_table_len[i]);
+		if (err)
 			goto err_mem;
 	}
 
@@ -906,6 +907,7 @@
 			 dev->caps.uar_page_size * dev->caps.num_uars,
 			 (unsigned long long)
 			 pci_resource_len(dev->persist->pdev, 2));
+		err = -ENOMEM;
 		goto err_mem;
 	}
 
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 9813d34..6fec3e9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -4952,26 +4952,41 @@
 	struct res_counter *counter;
 	struct res_counter *tmp;
 	int err;
-	int index;
+	int *counters_arr = NULL;
+	int i, j;
 
 	err = move_all_busy(dev, slave, RES_COUNTER);
 	if (err)
 		mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
 			  slave);
 
-	spin_lock_irq(mlx4_tlock(dev));
-	list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
-		if (counter->com.owner == slave) {
-			index = counter->com.res_id;
-			rb_erase(&counter->com.node,
-				 &tracker->res_tree[RES_COUNTER]);
-			list_del(&counter->com.list);
-			kfree(counter);
-			__mlx4_counter_free(dev, index);
+	counters_arr = kmalloc_array(dev->caps.max_counters,
+				     sizeof(*counters_arr), GFP_KERNEL);
+	if (!counters_arr)
+		return;
+
+	do {
+		i = 0;
+		j = 0;
+		spin_lock_irq(mlx4_tlock(dev));
+		list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
+			if (counter->com.owner == slave) {
+				counters_arr[i++] = counter->com.res_id;
+				rb_erase(&counter->com.node,
+					 &tracker->res_tree[RES_COUNTER]);
+				list_del(&counter->com.list);
+				kfree(counter);
+			}
+		}
+		spin_unlock_irq(mlx4_tlock(dev));
+
+		while (j < i) {
+			__mlx4_counter_free(dev, counters_arr[j++]);
 			mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
 		}
-	}
-	spin_unlock_irq(mlx4_tlock(dev));
+	} while (i);
+
+	kfree(counters_arr);
 }
 
 static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h
index f2ae62d..22e72bf 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en.h
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h
@@ -334,9 +334,15 @@
 
 #define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
 
+enum mlx5e_dma_map_type {
+	MLX5E_DMA_MAP_SINGLE,
+	MLX5E_DMA_MAP_PAGE
+};
+
 struct mlx5e_sq_dma {
-	dma_addr_t addr;
-	u32        size;
+	dma_addr_t              addr;
+	u32                     size;
+	enum mlx5e_dma_map_type type;
 };
 
 enum {
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 5fc4d2d..1e52db3 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -1332,6 +1332,42 @@
 	return err;
 }
 
+static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
+						  u32 tirn)
+{
+	void *in;
+	int inlen;
+	int err;
+
+	inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
+	in = mlx5_vzalloc(inlen);
+	if (!in)
+		return -ENOMEM;
+
+	MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
+
+	err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
+
+	kvfree(in);
+
+	return err;
+}
+
+static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
+{
+	int err;
+	int i;
+
+	for (i = 0; i < MLX5E_NUM_TT; i++) {
+		err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
+							     priv->tirn[i]);
+		if (err)
+			return err;
+	}
+
+	return 0;
+}
+
 static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
 {
 	struct mlx5e_priv *priv = netdev_priv(netdev);
@@ -1376,6 +1412,13 @@
 		goto err_clear_state_opened_flag;
 	}
 
+	err = mlx5e_refresh_tirs_self_loopback_enable(priv);
+	if (err) {
+		netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
+			   __func__, err);
+		goto err_close_channels;
+	}
+
 	mlx5e_update_carrier(priv);
 	mlx5e_redirect_rqts(priv);
 
@@ -1383,6 +1426,8 @@
 
 	return 0;
 
+err_close_channels:
+	mlx5e_close_channels(priv);
 err_clear_state_opened_flag:
 	clear_bit(MLX5E_STATE_OPENED, &priv->state);
 	return err;
@@ -1856,6 +1901,8 @@
 
 	mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
 
+	max_mtu = MLX5E_HW2SW_MTU(max_mtu);
+
 	if (new_mtu > max_mtu) {
 		netdev_err(netdev,
 			   "%s: Bad MTU (%d) > (%d) Max\n",
@@ -1909,6 +1956,9 @@
 			       "Not creating net device, some required device capabilities are missing\n");
 		return -ENOTSUPP;
 	}
+	if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
+		mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
+
 	return 0;
 }
 
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
index cd8f85a..1341b1d 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tx.c
@@ -61,41 +61,49 @@
 	}
 }
 
-static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr,
-				      u32 *size)
+static inline void mlx5e_tx_dma_unmap(struct device *pdev,
+				      struct mlx5e_sq_dma *dma)
 {
-	sq->dma_fifo_pc--;
-	*addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr;
-	*size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size;
+	switch (dma->type) {
+	case MLX5E_DMA_MAP_SINGLE:
+		dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+		break;
+	case MLX5E_DMA_MAP_PAGE:
+		dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
+		break;
+	default:
+		WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
+	}
+}
+
+static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
+				  dma_addr_t addr,
+				  u32 size,
+				  enum mlx5e_dma_map_type map_type)
+{
+	sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
+	sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
+	sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
+	sq->dma_fifo_pc++;
+}
+
+static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
+{
+	return &sq->dma_fifo[i & sq->dma_fifo_mask];
 }
 
 static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
 {
-	dma_addr_t addr;
-	u32 size;
 	int i;
 
 	for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
-		mlx5e_dma_pop_last_pushed(sq, &addr, &size);
-		dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
+		struct mlx5e_sq_dma *last_pushed_dma =
+			mlx5e_dma_get(sq, --sq->dma_fifo_pc);
+
+		mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
 	}
 }
 
-static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr,
-				  u32 size)
-{
-	sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
-	sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
-	sq->dma_fifo_pc++;
-}
-
-static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr,
-				 u32 *size)
-{
-	*addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr;
-	*size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
-}
-
 u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
 		       void *accel_priv, select_queue_fallback_t fallback)
 {
@@ -118,8 +126,15 @@
 	 */
 #define MLX5E_MIN_INLINE ETH_HLEN
 
-	if (bf && (skb_headlen(skb) <= sq->max_inline))
-		return skb_headlen(skb);
+	if (bf) {
+		u16 ihs = skb_headlen(skb);
+
+		if (skb_vlan_tag_present(skb))
+			ihs += VLAN_HLEN;
+
+		if (ihs <= sq->max_inline)
+			return skb_headlen(skb);
+	}
 
 	return MLX5E_MIN_INLINE;
 }
@@ -218,7 +233,7 @@
 		dseg->lkey       = sq->mkey_be;
 		dseg->byte_count = cpu_to_be32(headlen);
 
-		mlx5e_dma_push(sq, dma_addr, headlen);
+		mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
 		MLX5E_TX_SKB_CB(skb)->num_dma++;
 
 		dseg++;
@@ -237,7 +252,7 @@
 		dseg->lkey       = sq->mkey_be;
 		dseg->byte_count = cpu_to_be32(fsz);
 
-		mlx5e_dma_push(sq, dma_addr, fsz);
+		mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
 		MLX5E_TX_SKB_CB(skb)->num_dma++;
 
 		dseg++;
@@ -353,13 +368,10 @@
 			}
 
 			for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
-				dma_addr_t addr;
-				u32 size;
+				struct mlx5e_sq_dma *dma =
+					mlx5e_dma_get(sq, dma_fifo_cc++);
 
-				mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size);
-				dma_fifo_cc++;
-				dma_unmap_single(sq->pdev, addr, size,
-						 DMA_TO_DEVICE);
+				mlx5e_tx_dma_unmap(sq->pdev, dma);
 			}
 
 			npkts++;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index b4f2123..79ef799 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -7429,15 +7429,15 @@
 
 			rtl8169_rx_vlan_tag(desc, skb);
 
+			if (skb->pkt_type == PACKET_MULTICAST)
+				dev->stats.multicast++;
+
 			napi_gro_receive(&tp->napi, skb);
 
 			u64_stats_update_begin(&tp->rx_stats.syncp);
 			tp->rx_stats.packets++;
 			tp->rx_stats.bytes += pkt_size;
 			u64_stats_update_end(&tp->rx_stats.syncp);
-
-			if (skb->pkt_type == PACKET_MULTICAST)
-				dev->stats.multicast++;
 		}
 release_descriptor:
 		desc->opts2 = 0;
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index aa7b208..ee8d1ec 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -408,8 +408,6 @@
 	/* Interrupt enable: */
 	/* Frame receive */
 	ravb_write(ndev, RIC0_FRE0 | RIC0_FRE1, RIC0);
-	/* Receive FIFO full warning */
-	ravb_write(ndev, RIC1_RFWE, RIC1);
 	/* Receive FIFO full error, descriptor empty */
 	ravb_write(ndev, RIC2_QFE0 | RIC2_QFE1 | RIC2_RFFE, RIC2);
 	/* Frame transmitted, timestamp FIFO updated */
@@ -733,8 +731,10 @@
 			    ((tis  & tic)  & BIT(q))) {
 				if (napi_schedule_prep(&priv->napi[q])) {
 					/* Mask RX and TX interrupts */
-					ravb_write(ndev, ric0 & ~BIT(q), RIC0);
-					ravb_write(ndev, tic  & ~BIT(q), TIC);
+					ric0 &= ~BIT(q);
+					tic &= ~BIT(q);
+					ravb_write(ndev, ric0, RIC0);
+					ravb_write(ndev, tic, TIC);
 					__napi_schedule(&priv->napi[q]);
 				} else {
 					netdev_warn(ndev,
diff --git a/drivers/net/ethernet/sfc/efx.c b/drivers/net/ethernet/sfc/efx.c
index d288f1c..a3c42a3 100644
--- a/drivers/net/ethernet/sfc/efx.c
+++ b/drivers/net/ethernet/sfc/efx.c
@@ -3422,7 +3422,7 @@
  * with our request for slot reset the mmio_enabled callback will never be
  * called, and the link_reset callback is not used by AER or EEH mechanisms.
  */
-static struct pci_error_handlers efx_err_handlers = {
+static const struct pci_error_handlers efx_err_handlers = {
 	.error_detected = efx_io_error_detected,
 	.slot_reset	= efx_io_slot_reset,
 	.resume		= efx_io_resume,
diff --git a/drivers/net/ethernet/smsc/smsc911x.c b/drivers/net/ethernet/smsc/smsc911x.c
index c860c90..219a99b 100644
--- a/drivers/net/ethernet/smsc/smsc911x.c
+++ b/drivers/net/ethernet/smsc/smsc911x.c
@@ -809,22 +809,17 @@
 
 static int smsc911x_phy_reset(struct smsc911x_data *pdata)
 {
-	struct phy_device *phy_dev = pdata->phy_dev;
 	unsigned int temp;
 	unsigned int i = 100000;
 
-	BUG_ON(!phy_dev);
-	BUG_ON(!phy_dev->bus);
-
-	SMSC_TRACE(pdata, hw, "Performing PHY BCR Reset");
-	smsc911x_mii_write(phy_dev->bus, phy_dev->addr, MII_BMCR, BMCR_RESET);
+	temp = smsc911x_reg_read(pdata, PMT_CTRL);
+	smsc911x_reg_write(pdata, PMT_CTRL, temp | PMT_CTRL_PHY_RST_);
 	do {
 		msleep(1);
-		temp = smsc911x_mii_read(phy_dev->bus, phy_dev->addr,
-			MII_BMCR);
-	} while ((i--) && (temp & BMCR_RESET));
+		temp = smsc911x_reg_read(pdata, PMT_CTRL);
+	} while ((i--) && (temp & PMT_CTRL_PHY_RST_));
 
-	if (temp & BMCR_RESET) {
+	if (unlikely(temp & PMT_CTRL_PHY_RST_)) {
 		SMSC_WARN(pdata, hw, "PHY reset failed to complete");
 		return -EIO;
 	}
@@ -2296,7 +2291,7 @@
 	}
 
 	/* Reset the LAN911x */
-	if (smsc911x_soft_reset(pdata))
+	if (smsc911x_phy_reset(pdata) || smsc911x_soft_reset(pdata))
 		return -ENODEV;
 
 	dev->flags |= IFF_MULTICAST;
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
index 9d89bdb..82de68b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-ipq806x.c
@@ -337,11 +337,11 @@
 			     QSGMII_PHY_RX_SIGNAL_DETECT_EN |
 			     QSGMII_PHY_TX_DRIVER_EN |
 			     QSGMII_PHY_QSGMII_EN |
-			     0x4 << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
-			     0x3 << QSGMII_PHY_RX_DC_BIAS_OFFSET |
-			     0x1 << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
-			     0x2 << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
-			     0xC << QSGMII_PHY_TX_DRV_AMP_OFFSET);
+			     0x4ul << QSGMII_PHY_PHASE_LOOP_GAIN_OFFSET |
+			     0x3ul << QSGMII_PHY_RX_DC_BIAS_OFFSET |
+			     0x1ul << QSGMII_PHY_RX_INPUT_EQU_OFFSET |
+			     0x2ul << QSGMII_PHY_CDR_PI_SLEW_OFFSET |
+			     0xCul << QSGMII_PHY_TX_DRV_AMP_OFFSET);
 	}
 
 	plat_dat->has_gmac = true;
diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c
index ae68afd..f38696c 100644
--- a/drivers/net/ethernet/via/via-velocity.c
+++ b/drivers/net/ethernet/via/via-velocity.c
@@ -345,13 +345,6 @@
 */
 VELOCITY_PARAM(speed_duplex, "Setting the speed and duplex mode");
 
-#define VAL_PKT_LEN_DEF     0
-/* ValPktLen[] is used for setting the checksum offload ability of NIC.
-   0: Receive frame with invalid layer 2 length (Default)
-   1: Drop frame with invalid layer 2 length
-*/
-VELOCITY_PARAM(ValPktLen, "Receiving or Drop invalid 802.3 frame");
-
 #define WOL_OPT_DEF     0
 #define WOL_OPT_MIN     0
 #define WOL_OPT_MAX     7
@@ -494,7 +487,6 @@
 
 	velocity_set_int_opt(&opts->flow_cntl, flow_control[index], FLOW_CNTL_MIN, FLOW_CNTL_MAX, FLOW_CNTL_DEF, "flow_control", devname);
 	velocity_set_bool_opt(&opts->flags, IP_byte_align[index], IP_ALIG_DEF, VELOCITY_FLAGS_IP_ALIGN, "IP_byte_align", devname);
-	velocity_set_bool_opt(&opts->flags, ValPktLen[index], VAL_PKT_LEN_DEF, VELOCITY_FLAGS_VAL_PKT_LEN, "ValPktLen", devname);
 	velocity_set_int_opt((int *) &opts->spd_dpx, speed_duplex[index], MED_LNK_MIN, MED_LNK_MAX, MED_LNK_DEF, "Media link mode", devname);
 	velocity_set_int_opt(&opts->wol_opts, wol_opts[index], WOL_OPT_MIN, WOL_OPT_MAX, WOL_OPT_DEF, "Wake On Lan options", devname);
 	opts->numrx = (opts->numrx & ~3);
@@ -2055,8 +2047,9 @@
 	int pkt_len = le16_to_cpu(rd->rdesc0.len) & 0x3fff;
 	struct sk_buff *skb;
 
-	if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP)) {
-		VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
+	if (unlikely(rd->rdesc0.RSR & (RSR_STP | RSR_EDP | RSR_RL))) {
+		if (rd->rdesc0.RSR & (RSR_STP | RSR_EDP))
+			VELOCITY_PRT(MSG_LEVEL_VERBOSE, KERN_ERR " %s : the received frame spans multiple RDs.\n", vptr->netdev->name);
 		stats->rx_length_errors++;
 		return -EINVAL;
 	}
@@ -2069,17 +2062,6 @@
 	dma_sync_single_for_cpu(vptr->dev, rd_info->skb_dma,
 				    vptr->rx.buf_sz, DMA_FROM_DEVICE);
 
-	/*
-	 *	Drop frame not meeting IEEE 802.3
-	 */
-
-	if (vptr->flags & VELOCITY_FLAGS_VAL_PKT_LEN) {
-		if (rd->rdesc0.RSR & RSR_RL) {
-			stats->rx_length_errors++;
-			return -EINVAL;
-		}
-	}
-
 	velocity_rx_csum(rd, skb);
 
 	if (velocity_rx_copy(&skb, pkt_len, vptr) < 0) {
diff --git a/drivers/net/fjes/fjes_hw.c b/drivers/net/fjes/fjes_hw.c
index bb8b530..b103adb 100644
--- a/drivers/net/fjes/fjes_hw.c
+++ b/drivers/net/fjes/fjes_hw.c
@@ -599,7 +599,7 @@
 		FJES_CMD_REQ_RES_CODE_BUSY) &&
 	       (timeout > 0)) {
 		msleep(200 + hw->my_epid * 20);
-			timeout -= (200 + hw->my_epid * 20);
+		timeout -= (200 + hw->my_epid * 20);
 
 		res_buf->unshare_buffer.length = 0;
 		res_buf->unshare_buffer.code = 0;
diff --git a/drivers/net/ipvlan/ipvlan_core.c b/drivers/net/ipvlan/ipvlan_core.c
index d50887e..8c48bb2 100644
--- a/drivers/net/ipvlan/ipvlan_core.c
+++ b/drivers/net/ipvlan/ipvlan_core.c
@@ -254,7 +254,7 @@
 	}
 }
 
-static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff *skb,
+static int ipvlan_rcv_frame(struct ipvl_addr *addr, struct sk_buff **pskb,
 			    bool local)
 {
 	struct ipvl_dev *ipvlan = addr->master;
@@ -262,6 +262,7 @@
 	unsigned int len;
 	rx_handler_result_t ret = RX_HANDLER_CONSUMED;
 	bool success = false;
+	struct sk_buff *skb = *pskb;
 
 	len = skb->len + ETH_HLEN;
 	if (unlikely(!(dev->flags & IFF_UP))) {
@@ -273,6 +274,7 @@
 	if (!skb)
 		goto out;
 
+	*pskb = skb;
 	skb->dev = dev;
 	skb->pkt_type = PACKET_HOST;
 
@@ -486,7 +488,7 @@
 
 	addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
 	if (addr)
-		return ipvlan_rcv_frame(addr, skb, true);
+		return ipvlan_rcv_frame(addr, &skb, true);
 
 out:
 	skb->dev = ipvlan->phy_dev;
@@ -506,7 +508,7 @@
 		if (lyr3h) {
 			addr = ipvlan_addr_lookup(ipvlan->port, lyr3h, addr_type, true);
 			if (addr)
-				return ipvlan_rcv_frame(addr, skb, true);
+				return ipvlan_rcv_frame(addr, &skb, true);
 		}
 		skb = skb_share_check(skb, GFP_ATOMIC);
 		if (!skb)
@@ -589,7 +591,7 @@
 
 	addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
 	if (addr)
-		ret = ipvlan_rcv_frame(addr, skb, false);
+		ret = ipvlan_rcv_frame(addr, pskb, false);
 
 out:
 	return ret;
@@ -626,7 +628,7 @@
 
 		addr = ipvlan_addr_lookup(port, lyr3h, addr_type, true);
 		if (addr)
-			ret = ipvlan_rcv_frame(addr, skb, false);
+			ret = ipvlan_rcv_frame(addr, pskb, false);
 	}
 
 	return ret;
@@ -651,5 +653,5 @@
 	WARN_ONCE(true, "ipvlan_handle_frame() called for mode = [%hx]\n",
 			  port->mode);
 	kfree_skb(skb);
-	return NET_RX_DROP;
+	return RX_HANDLER_CONSUMED;
 }
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 86f6c62..06c8bfe 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -415,6 +415,7 @@
 		skb = ip_check_defrag(dev_net(skb->dev), skb, IP_DEFRAG_MACVLAN);
 		if (!skb)
 			return RX_HANDLER_CONSUMED;
+		*pskb = skb;
 		eth = eth_hdr(skb);
 		macvlan_forward_source(skb, port, eth->h_source);
 		src = macvlan_hash_lookup(port, eth->h_source);
@@ -456,6 +457,7 @@
 		goto out;
 	}
 
+	*pskb = skb;
 	skb->dev = dev;
 	skb->pkt_type = PACKET_HOST;
 
diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c
index fabf11d..2d020a3 100644
--- a/drivers/net/phy/at803x.c
+++ b/drivers/net/phy/at803x.c
@@ -308,6 +308,8 @@
 	.flags			= PHY_HAS_INTERRUPT,
 	.config_aneg		= genphy_config_aneg,
 	.read_status		= genphy_read_status,
+	.ack_interrupt		= at803x_ack_interrupt,
+	.config_intr		= at803x_config_intr,
 	.driver			= {
 		.owner = THIS_MODULE,
 	},
@@ -327,6 +329,8 @@
 	.flags			= PHY_HAS_INTERRUPT,
 	.config_aneg		= genphy_config_aneg,
 	.read_status		= genphy_read_status,
+	.ack_interrupt		= at803x_ack_interrupt,
+	.config_intr		= at803x_config_intr,
 	.driver			= {
 		.owner = THIS_MODULE,
 	},
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index 5de8d58..0240552 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -1154,6 +1154,21 @@
 		.driver = { .owner = THIS_MODULE },
 	},
 	{
+		.phy_id = MARVELL_PHY_ID_88E1540,
+		.phy_id_mask = MARVELL_PHY_ID_MASK,
+		.name = "Marvell 88E1540",
+		.features = PHY_GBIT_FEATURES,
+		.flags = PHY_HAS_INTERRUPT,
+		.config_aneg = &m88e1510_config_aneg,
+		.read_status = &marvell_read_status,
+		.ack_interrupt = &marvell_ack_interrupt,
+		.config_intr = &marvell_config_intr,
+		.did_interrupt = &m88e1121_did_interrupt,
+		.resume = &genphy_resume,
+		.suspend = &genphy_suspend,
+		.driver = { .owner = THIS_MODULE },
+	},
+	{
 		.phy_id = MARVELL_PHY_ID_88E3016,
 		.phy_id_mask = MARVELL_PHY_ID_MASK,
 		.name = "Marvell 88E3016",
@@ -1186,6 +1201,7 @@
 	{ MARVELL_PHY_ID_88E1318S, MARVELL_PHY_ID_MASK },
 	{ MARVELL_PHY_ID_88E1116R, MARVELL_PHY_ID_MASK },
 	{ MARVELL_PHY_ID_88E1510, MARVELL_PHY_ID_MASK },
+	{ MARVELL_PHY_ID_88E1540, MARVELL_PHY_ID_MASK },
 	{ MARVELL_PHY_ID_88E3016, MARVELL_PHY_ID_MASK },
 	{ }
 };
diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c
index adb48ab..48ce6ef 100644
--- a/drivers/net/phy/phy.c
+++ b/drivers/net/phy/phy.c
@@ -863,6 +863,9 @@
 			needs_aneg = true;
 		break;
 	case PHY_NOLINK:
+		if (phy_interrupt_is_valid(phydev))
+			break;
+
 		err = phy_read_status(phydev);
 		if (err)
 			break;
diff --git a/drivers/net/phy/vitesse.c b/drivers/net/phy/vitesse.c
index 76cad71..dd295db 100644
--- a/drivers/net/phy/vitesse.c
+++ b/drivers/net/phy/vitesse.c
@@ -66,6 +66,7 @@
 #define PHY_ID_VSC8244			0x000fc6c0
 #define PHY_ID_VSC8514			0x00070670
 #define PHY_ID_VSC8574			0x000704a0
+#define PHY_ID_VSC8601			0x00070420
 #define PHY_ID_VSC8662			0x00070660
 #define PHY_ID_VSC8221			0x000fc550
 #define PHY_ID_VSC8211			0x000fc4b0
@@ -133,7 +134,8 @@
 			(phydev->drv->phy_id == PHY_ID_VSC8234 ||
 			 phydev->drv->phy_id == PHY_ID_VSC8244 ||
 			 phydev->drv->phy_id == PHY_ID_VSC8514 ||
-			 phydev->drv->phy_id == PHY_ID_VSC8574) ?
+			 phydev->drv->phy_id == PHY_ID_VSC8574 ||
+			 phydev->drv->phy_id == PHY_ID_VSC8601) ?
 				MII_VSC8244_IMASK_MASK :
 				MII_VSC8221_IMASK_MASK);
 	else {
@@ -272,6 +274,18 @@
 	.config_intr    = &vsc82xx_config_intr,
 	.driver         = { .owner = THIS_MODULE,},
 }, {
+	.phy_id         = PHY_ID_VSC8601,
+	.name           = "Vitesse VSC8601",
+	.phy_id_mask    = 0x000ffff0,
+	.features       = PHY_GBIT_FEATURES,
+	.flags          = PHY_HAS_INTERRUPT,
+	.config_init    = &genphy_config_init,
+	.config_aneg    = &genphy_config_aneg,
+	.read_status    = &genphy_read_status,
+	.ack_interrupt  = &vsc824x_ack_interrupt,
+	.config_intr    = &vsc82xx_config_intr,
+	.driver         = { .owner = THIS_MODULE,},
+}, {
 	.phy_id         = PHY_ID_VSC8662,
 	.name           = "Vitesse VSC8662",
 	.phy_id_mask    = 0x000ffff0,
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index c78d3cb..3da70bf 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -696,6 +696,11 @@
 			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
 	.driver_info = (kernel_ulong_t) &wwan_info,
 }, {
+	/* Dell DW5580 modules */
+	USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, 0x81ba, USB_CLASS_COMM,
+			USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE),
+	.driver_info = (kernel_ulong_t)&wwan_info,
+}, {
 	USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET,
 			USB_CDC_PROTO_NONE),
 	.driver_info = (unsigned long) &cdc_info,
diff --git a/drivers/net/vmxnet3/vmxnet3_drv.c b/drivers/net/vmxnet3/vmxnet3_drv.c
index 46f4cad..899ea42 100644
--- a/drivers/net/vmxnet3/vmxnet3_drv.c
+++ b/drivers/net/vmxnet3/vmxnet3_drv.c
@@ -2157,12 +2157,13 @@
 		if (!netdev_mc_empty(netdev)) {
 			new_table = vmxnet3_copy_mc(netdev);
 			if (new_table) {
-				rxConf->mfTableLen = cpu_to_le16(
-					netdev_mc_count(netdev) * ETH_ALEN);
+				size_t sz = netdev_mc_count(netdev) * ETH_ALEN;
+
+				rxConf->mfTableLen = cpu_to_le16(sz);
 				new_table_pa = dma_map_single(
 							&adapter->pdev->dev,
 							new_table,
-							rxConf->mfTableLen,
+							sz,
 							PCI_DMA_TODEVICE);
 			}
 
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3f859a5..4c58c83 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
 /*
  * Version numbers
  */
-#define VMXNET3_DRIVER_VERSION_STRING   "1.4.3.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING   "1.4.4.0-k"
 
 /* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM      0x01040300
+#define VMXNET3_DRIVER_VERSION_NUM      0x01040400
 
 #if defined(CONFIG_PCI_MSI)
 	/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index e735c72..edb1984 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -1685,8 +1685,8 @@
 {
 	struct device *bridge = pci_get_host_bridge_device(dev);
 
-	if (IS_ENABLED(CONFIG_OF) && dev->dev.of_node) {
-		if (bridge->parent)
+	if (IS_ENABLED(CONFIG_OF) &&
+		bridge->parent && bridge->parent->of_node) {
 			of_dma_configure(&dev->dev, bridge->parent->of_node);
 	} else if (has_acpi_companion(bridge)) {
 		struct acpi_device *adev = to_acpi_device_node(bridge->fwnode);
diff --git a/drivers/s390/cio/chsc.c b/drivers/s390/cio/chsc.c
index 548a189..a831d18 100644
--- a/drivers/s390/cio/chsc.c
+++ b/drivers/s390/cio/chsc.c
@@ -1080,28 +1080,10 @@
 	free_page((unsigned long)sei_page);
 }
 
-int chsc_enable_facility(int operation_code)
+int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code)
 {
-	unsigned long flags;
 	int ret;
-	struct {
-		struct chsc_header request;
-		u8 reserved1:4;
-		u8 format:4;
-		u8 reserved2;
-		u16 operation_code;
-		u32 reserved3;
-		u32 reserved4;
-		u32 operation_data_area[252];
-		struct chsc_header response;
-		u32 reserved5:4;
-		u32 format2:4;
-		u32 reserved6:24;
-	} __attribute__ ((packed)) *sda_area;
 
-	spin_lock_irqsave(&chsc_page_lock, flags);
-	memset(chsc_page, 0, PAGE_SIZE);
-	sda_area = chsc_page;
 	sda_area->request.length = 0x0400;
 	sda_area->request.code = 0x0031;
 	sda_area->operation_code = operation_code;
@@ -1119,10 +1101,25 @@
 	default:
 		ret = chsc_error_from_response(sda_area->response.code);
 	}
+out:
+	return ret;
+}
+
+int chsc_enable_facility(int operation_code)
+{
+	struct chsc_sda_area *sda_area;
+	unsigned long flags;
+	int ret;
+
+	spin_lock_irqsave(&chsc_page_lock, flags);
+	memset(chsc_page, 0, PAGE_SIZE);
+	sda_area = chsc_page;
+
+	ret = __chsc_enable_facility(sda_area, operation_code);
 	if (ret != 0)
 		CIO_CRW_EVENT(2, "chsc: sda (oc=%x) failed (rc=%04x)\n",
 			      operation_code, sda_area->response.code);
-out:
+
 	spin_unlock_irqrestore(&chsc_page_lock, flags);
 	return ret;
 }
diff --git a/drivers/s390/cio/chsc.h b/drivers/s390/cio/chsc.h
index 76c9b50..0de134c 100644
--- a/drivers/s390/cio/chsc.h
+++ b/drivers/s390/cio/chsc.h
@@ -115,6 +115,20 @@
 	u8 data[PAGE_SIZE - 20];
 } __attribute__ ((packed));
 
+struct chsc_sda_area {
+	struct chsc_header request;
+	u8 :4;
+	u8 format:4;
+	u8 :8;
+	u16 operation_code;
+	u32 :32;
+	u32 :32;
+	u32 operation_data_area[252];
+	struct chsc_header response;
+	u32 :4;
+	u32 format2:4;
+	u32 :24;
+} __packed __aligned(PAGE_SIZE);
 
 extern int chsc_get_ssd_info(struct subchannel_id schid,
 			     struct chsc_ssd_info *ssd);
@@ -122,6 +136,7 @@
 extern int chsc_init(void);
 extern void chsc_init_cleanup(void);
 
+int __chsc_enable_facility(struct chsc_sda_area *sda_area, int operation_code);
 extern int chsc_enable_facility(int);
 struct channel_subsystem;
 extern int chsc_secm(struct channel_subsystem *, int);
diff --git a/drivers/s390/cio/cio.c b/drivers/s390/cio/cio.c
index b5620e8..690b854 100644
--- a/drivers/s390/cio/cio.c
+++ b/drivers/s390/cio/cio.c
@@ -925,18 +925,32 @@
 
 int __init cio_get_iplinfo(struct cio_iplinfo *iplinfo)
 {
+	static struct chsc_sda_area sda_area __initdata;
 	struct subchannel_id schid;
 	struct schib schib;
 
 	schid = *(struct subchannel_id *)&S390_lowcore.subchannel_id;
 	if (!schid.one)
 		return -ENODEV;
+
+	if (schid.ssid) {
+		/*
+		 * Firmware should have already enabled MSS but whoever started
+		 * the kernel might have initiated a channel subsystem reset.
+		 * Ensure that MSS is enabled.
+		 */
+		memset(&sda_area, 0, sizeof(sda_area));
+		if (__chsc_enable_facility(&sda_area, CHSC_SDA_OC_MSS))
+			return -ENODEV;
+	}
 	if (stsch_err(schid, &schib))
 		return -ENODEV;
 	if (schib.pmcw.st != SUBCHANNEL_TYPE_IO)
 		return -ENODEV;
 	if (!schib.pmcw.dnv)
 		return -ENODEV;
+
+	iplinfo->ssid = schid.ssid;
 	iplinfo->devno = schib.pmcw.dev;
 	iplinfo->is_qdio = schib.pmcw.qf;
 	return 0;
diff --git a/drivers/s390/cio/css.c b/drivers/s390/cio/css.c
index 2ee3053..489e703 100644
--- a/drivers/s390/cio/css.c
+++ b/drivers/s390/cio/css.c
@@ -702,17 +702,12 @@
 		css->global_pgid.pgid_high.ext_cssid.version = 0x80;
 		css->global_pgid.pgid_high.ext_cssid.cssid = css->cssid;
 	} else {
-#ifdef CONFIG_SMP
 		css->global_pgid.pgid_high.cpu_addr = stap();
-#else
-		css->global_pgid.pgid_high.cpu_addr = 0;
-#endif
 	}
 	get_cpu_id(&cpu_id);
 	css->global_pgid.cpu_id = cpu_id.ident;
 	css->global_pgid.cpu_model = cpu_id.machine;
 	css->global_pgid.tod_high = tod_high;
-
 }
 
 static void
diff --git a/drivers/s390/crypto/Makefile b/drivers/s390/crypto/Makefile
index 57f710b..b8ab186 100644
--- a/drivers/s390/crypto/Makefile
+++ b/drivers/s390/crypto/Makefile
@@ -3,6 +3,9 @@
 #
 
 ap-objs := ap_bus.o
-obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o zcrypt_pcixcc.o
-obj-$(CONFIG_ZCRYPT) += zcrypt_cex2a.o zcrypt_cex4.o
+# zcrypt_api depends on ap
+obj-$(CONFIG_ZCRYPT) += ap.o zcrypt_api.o
+# msgtype* depend on zcrypt_api
 obj-$(CONFIG_ZCRYPT) += zcrypt_msgtype6.o zcrypt_msgtype50.o
+# adapter drivers depend on ap, zcrypt_api and msgtype*
+obj-$(CONFIG_ZCRYPT) += zcrypt_pcixcc.o zcrypt_cex2a.o zcrypt_cex4.o
diff --git a/drivers/s390/crypto/ap_bus.c b/drivers/s390/crypto/ap_bus.c
index 9cb3dfb..61f7685 100644
--- a/drivers/s390/crypto/ap_bus.c
+++ b/drivers/s390/crypto/ap_bus.c
@@ -74,6 +74,7 @@
 static struct ap_config_info *ap_configuration;
 static DEFINE_SPINLOCK(ap_device_list_lock);
 static LIST_HEAD(ap_device_list);
+static bool initialised;
 
 /*
  * Workqueue timer for bus rescan.
@@ -1384,6 +1385,9 @@
 {
 	struct device_driver *drv = &ap_drv->driver;
 
+	if (!initialised)
+		return -ENODEV;
+
 	drv->bus = &ap_bus_type;
 	drv->probe = ap_device_probe;
 	drv->remove = ap_device_remove;
@@ -1808,6 +1812,7 @@
 		goto out_pm;
 
 	queue_work(system_long_wq, &ap_scan_work);
+	initialised = true;
 
 	return 0;
 
@@ -1837,6 +1842,7 @@
 {
 	int i;
 
+	initialised = false;
 	ap_reset_domain();
 	ap_poll_thread_stop();
 	del_timer_sync(&ap_config_timer);
diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c
index a9603eb..9f8fa42 100644
--- a/drivers/s390/crypto/zcrypt_api.c
+++ b/drivers/s390/crypto/zcrypt_api.c
@@ -317,11 +317,9 @@
 
 void zcrypt_msgtype_register(struct zcrypt_ops *zops)
 {
-	if (zops->owner) {
-		spin_lock_bh(&zcrypt_ops_list_lock);
-		list_add_tail(&zops->list, &zcrypt_ops_list);
-		spin_unlock_bh(&zcrypt_ops_list_lock);
-	}
+	spin_lock_bh(&zcrypt_ops_list_lock);
+	list_add_tail(&zops->list, &zcrypt_ops_list);
+	spin_unlock_bh(&zcrypt_ops_list_lock);
 }
 EXPORT_SYMBOL(zcrypt_msgtype_register);
 
@@ -342,7 +340,7 @@
 	spin_lock_bh(&zcrypt_ops_list_lock);
 	list_for_each_entry(zops, &zcrypt_ops_list, list) {
 		if ((zops->variant == variant) &&
-		    (!strncmp(zops->owner->name, name, MODULE_NAME_LEN))) {
+		    (!strncmp(zops->name, name, sizeof(zops->name)))) {
 			found = 1;
 			break;
 		}
diff --git a/drivers/s390/crypto/zcrypt_api.h b/drivers/s390/crypto/zcrypt_api.h
index 7508768..38618f0 100644
--- a/drivers/s390/crypto/zcrypt_api.h
+++ b/drivers/s390/crypto/zcrypt_api.h
@@ -96,6 +96,7 @@
 	struct list_head list;		/* zcrypt ops list. */
 	struct module *owner;
 	int variant;
+	char name[128];
 };
 
 struct zcrypt_device {
diff --git a/drivers/s390/crypto/zcrypt_msgtype50.c b/drivers/s390/crypto/zcrypt_msgtype50.c
index 71ceee9..74edf29 100644
--- a/drivers/s390/crypto/zcrypt_msgtype50.c
+++ b/drivers/s390/crypto/zcrypt_msgtype50.c
@@ -513,6 +513,7 @@
 	.rsa_modexpo = zcrypt_cex2a_modexpo,
 	.rsa_modexpo_crt = zcrypt_cex2a_modexpo_crt,
 	.owner = THIS_MODULE,
+	.name = MSGTYPE50_NAME,
 	.variant = MSGTYPE50_VARIANT_DEFAULT,
 };
 
diff --git a/drivers/s390/crypto/zcrypt_msgtype6.c b/drivers/s390/crypto/zcrypt_msgtype6.c
index 7476221..9a2dd47 100644
--- a/drivers/s390/crypto/zcrypt_msgtype6.c
+++ b/drivers/s390/crypto/zcrypt_msgtype6.c
@@ -1119,6 +1119,7 @@
  */
 static struct zcrypt_ops zcrypt_msgtype6_norng_ops = {
 	.owner = THIS_MODULE,
+	.name = MSGTYPE06_NAME,
 	.variant = MSGTYPE06_VARIANT_NORNG,
 	.rsa_modexpo = zcrypt_msgtype6_modexpo,
 	.rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1127,6 +1128,7 @@
 
 static struct zcrypt_ops zcrypt_msgtype6_ops = {
 	.owner = THIS_MODULE,
+	.name = MSGTYPE06_NAME,
 	.variant = MSGTYPE06_VARIANT_DEFAULT,
 	.rsa_modexpo = zcrypt_msgtype6_modexpo,
 	.rsa_modexpo_crt = zcrypt_msgtype6_modexpo_crt,
@@ -1136,6 +1138,7 @@
 
 static struct zcrypt_ops zcrypt_msgtype6_ep11_ops = {
 	.owner = THIS_MODULE,
+	.name = MSGTYPE06_NAME,
 	.variant = MSGTYPE06_VARIANT_EP11,
 	.rsa_modexpo = NULL,
 	.rsa_modexpo_crt = NULL,
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 25abd4e..91a00301 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@
 
 static int __init sh_pm_runtime_init(void)
 {
-	if (IS_ENABLED(CONFIG_ARCH_SHMOBILE_MULTI)) {
+	if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
 		if (!of_find_compatible_node(NULL, NULL,
 					     "renesas,cpg-mstp-clocks"))
 			return 0;
diff --git a/fs/Kconfig b/fs/Kconfig
index da3f32f..6ce72d8 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -46,6 +46,12 @@
 	  or if unsure, say N.  Saying Y will increase the size of the kernel
 	  by about 5kB.
 
+config FS_DAX_PMD
+	bool
+	default FS_DAX
+	depends on FS_DAX
+	depends on BROKEN
+
 endif # BLOCK
 
 # Posix ACL utility routines
diff --git a/fs/block_dev.c b/fs/block_dev.c
index bb0dfb1..c25639e 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -390,9 +390,17 @@
 			struct page *page)
 {
 	const struct block_device_operations *ops = bdev->bd_disk->fops;
+	int result = -EOPNOTSUPP;
+
 	if (!ops->rw_page || bdev_get_integrity(bdev))
-		return -EOPNOTSUPP;
-	return ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
+		return result;
+
+	result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
+	if (result)
+		return result;
+	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, READ);
+	blk_queue_exit(bdev->bd_queue);
+	return result;
 }
 EXPORT_SYMBOL_GPL(bdev_read_page);
 
@@ -421,14 +429,20 @@
 	int result;
 	int rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE;
 	const struct block_device_operations *ops = bdev->bd_disk->fops;
+
 	if (!ops->rw_page || bdev_get_integrity(bdev))
 		return -EOPNOTSUPP;
+	result = blk_queue_enter(bdev->bd_queue, GFP_KERNEL);
+	if (result)
+		return result;
+
 	set_page_writeback(page);
 	result = ops->rw_page(bdev, sector + get_start_sect(bdev), page, rw);
 	if (result)
 		end_page_writeback(page);
 	else
 		unlock_page(page);
+	blk_queue_exit(bdev->bd_queue);
 	return result;
 }
 EXPORT_SYMBOL_GPL(bdev_write_page);
diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
index 7a6b02f..c0f3da3 100644
--- a/fs/cachefiles/rdwr.c
+++ b/fs/cachefiles/rdwr.c
@@ -879,7 +879,7 @@
 	loff_t pos, eof;
 	size_t len;
 	void *data;
-	int ret;
+	int ret = -ENOBUFS;
 
 	ASSERT(op != NULL);
 	ASSERT(page != NULL);
diff --git a/fs/dax.c b/fs/dax.c
index d1e5cb7..43671b6 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -541,6 +541,10 @@
 	unsigned long pfn;
 	int result = 0;
 
+	/* dax pmd mappings are broken wrt gup and fork */
+	if (!IS_ENABLED(CONFIG_FS_DAX_PMD))
+		return VM_FAULT_FALLBACK;
+
 	/* Fall back to PTEs if we're going to COW */
 	if (write && !(vma->vm_flags & VM_SHARED))
 		return VM_FAULT_FALLBACK;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 3a71cea..748d35a 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -569,6 +569,8 @@
 			/* Fall through */
 		case Opt_dax:
 #ifdef CONFIG_FS_DAX
+			ext2_msg(sb, KERN_WARNING,
+		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
 			set_opt(sbi->s_mount_opt, DAX);
 #else
 			ext2_msg(sb, KERN_INFO, "dax option not supported");
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 753f4e6..c9ab67d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1664,8 +1664,12 @@
 		}
 		sbi->s_jquota_fmt = m->mount_opt;
 #endif
-#ifndef CONFIG_FS_DAX
 	} else if (token == Opt_dax) {
+#ifdef CONFIG_FS_DAX
+		ext4_msg(sb, KERN_WARNING,
+		"DAX enabled. Warning: EXPERIMENTAL, use at your own risk");
+			sbi->s_mount_opt |= m->mount_opt;
+#else
 		ext4_msg(sb, KERN_INFO, "dax option not supported");
 		return -1;
 #endif
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index e67aeac..4b74c97 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -136,6 +136,9 @@
 
 void drm_atomic_legacy_backoff(struct drm_atomic_state *state);
 
+void
+drm_atomic_clean_old_fb(struct drm_device *dev, unsigned plane_mask, int ret);
+
 int __must_check drm_atomic_check_only(struct drm_atomic_state *state);
 int __must_check drm_atomic_commit(struct drm_atomic_state *state);
 int __must_check drm_atomic_async_commit(struct drm_atomic_state *state);
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 3fe27f8..c0d2b79 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -794,6 +794,8 @@
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 			 struct scsi_ioctl_command __user *);
 
+extern int blk_queue_enter(struct request_queue *q, gfp_t gfp);
+extern void blk_queue_exit(struct request_queue *q);
 extern void blk_start_queue(struct request_queue *q);
 extern void blk_stop_queue(struct request_queue *q);
 extern void blk_sync_queue(struct request_queue *q);
diff --git a/include/linux/marvell_phy.h b/include/linux/marvell_phy.h
index e6982ac..a57f0df 100644
--- a/include/linux/marvell_phy.h
+++ b/include/linux/marvell_phy.h
@@ -16,6 +16,7 @@
 #define MARVELL_PHY_ID_88E1318S		0x01410e90
 #define MARVELL_PHY_ID_88E1116R		0x01410e40
 #define MARVELL_PHY_ID_88E1510		0x01410dd0
+#define MARVELL_PHY_ID_88E1540		0x01410eb0
 #define MARVELL_PHY_ID_88E3016		0x01410e60
 
 /* struct phy_device dev_flags definitions */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index dd20974..1565324 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -453,26 +453,28 @@
 	u8         lro_cap[0x1];
 	u8         lro_psh_flag[0x1];
 	u8         lro_time_stamp[0x1];
-	u8         reserved_0[0x6];
+	u8         reserved_0[0x3];
+	u8         self_lb_en_modifiable[0x1];
+	u8         reserved_1[0x2];
 	u8         max_lso_cap[0x5];
-	u8         reserved_1[0x4];
+	u8         reserved_2[0x4];
 	u8         rss_ind_tbl_cap[0x4];
-	u8         reserved_2[0x3];
+	u8         reserved_3[0x3];
 	u8         tunnel_lso_const_out_ip_id[0x1];
-	u8         reserved_3[0x2];
+	u8         reserved_4[0x2];
 	u8         tunnel_statless_gre[0x1];
 	u8         tunnel_stateless_vxlan[0x1];
 
-	u8         reserved_4[0x20];
+	u8         reserved_5[0x20];
 
-	u8         reserved_5[0x10];
+	u8         reserved_6[0x10];
 	u8         lro_min_mss_size[0x10];
 
-	u8         reserved_6[0x120];
+	u8         reserved_7[0x120];
 
 	u8         lro_timer_supported_periods[4][0x20];
 
-	u8         reserved_7[0x600];
+	u8         reserved_8[0x600];
 };
 
 struct mlx5_ifc_roce_cap_bits {
@@ -4051,9 +4053,11 @@
 };
 
 struct mlx5_ifc_modify_tir_bitmask_bits {
-	u8	   reserved[0x20];
+	u8	   reserved_0[0x20];
 
-	u8         reserved1[0x1f];
+	u8         reserved_1[0x1b];
+	u8         self_lb_en[0x1];
+	u8         reserved_2[0x3];
 	u8         lro[0x1];
 };
 
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index d208914..67bfac1 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2068,20 +2068,23 @@
 	struct u64_stats_sync   syncp;
 };
 
-#define netdev_alloc_pcpu_stats(type)				\
-({								\
-	typeof(type) __percpu *pcpu_stats = alloc_percpu(type); \
-	if (pcpu_stats)	{					\
-		int __cpu;					\
-		for_each_possible_cpu(__cpu) {			\
-			typeof(type) *stat;			\
-			stat = per_cpu_ptr(pcpu_stats, __cpu);	\
-			u64_stats_init(&stat->syncp);		\
-		}						\
-	}							\
-	pcpu_stats;						\
+#define __netdev_alloc_pcpu_stats(type, gfp)				\
+({									\
+	typeof(type) __percpu *pcpu_stats = alloc_percpu_gfp(type, gfp);\
+	if (pcpu_stats)	{						\
+		int __cpu;						\
+		for_each_possible_cpu(__cpu) {				\
+			typeof(type) *stat;				\
+			stat = per_cpu_ptr(pcpu_stats, __cpu);		\
+			u64_stats_init(&stat->syncp);			\
+		}							\
+	}								\
+	pcpu_stats;							\
 })
 
+#define netdev_alloc_pcpu_stats(type)					\
+	__netdev_alloc_pcpu_stats(type, GFP_KERNEL);
+
 #include <linux/notifier.h>
 
 /* netdevice notifier chain. Please remember to update the rtnetlink
@@ -3854,6 +3857,11 @@
 	return dev->priv_flags & IFF_EBRIDGE;
 }
 
+static inline bool netif_is_bridge_port(const struct net_device *dev)
+{
+	return dev->priv_flags & IFF_BRIDGE_PORT;
+}
+
 static inline bool netif_is_ovs_master(const struct net_device *dev)
 {
 	return dev->priv_flags & IFF_OPENVSWITCH;
diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
index 48bb01e..0e1f433 100644
--- a/include/linux/netfilter/ipset/ip_set.h
+++ b/include/linux/netfilter/ipset/ip_set.h
@@ -421,7 +421,7 @@
 extern int ip_set_get_ipaddr4(struct nlattr *nla,  __be32 *ipaddr);
 extern int ip_set_get_ipaddr6(struct nlattr *nla, union nf_inet_addr *ipaddr);
 extern size_t ip_set_elem_len(struct ip_set *set, struct nlattr *tb[],
-			      size_t len);
+			      size_t len, size_t align);
 extern int ip_set_get_extensions(struct ip_set *set, struct nlattr *tb[],
 				 struct ip_set_ext *ext);
 
diff --git a/include/linux/netfilter_ingress.h b/include/linux/netfilter_ingress.h
index 187feab..5fcd375 100644
--- a/include/linux/netfilter_ingress.h
+++ b/include/linux/netfilter_ingress.h
@@ -5,10 +5,13 @@
 #include <linux/netdevice.h>
 
 #ifdef CONFIG_NETFILTER_INGRESS
-static inline int nf_hook_ingress_active(struct sk_buff *skb)
+static inline bool nf_hook_ingress_active(const struct sk_buff *skb)
 {
-	return nf_hook_list_active(&skb->dev->nf_hooks_ingress,
-				   NFPROTO_NETDEV, NF_NETDEV_INGRESS);
+#ifdef HAVE_JUMP_LABEL
+	if (!static_key_false(&nf_hooks_needed[NFPROTO_NETDEV][NF_NETDEV_INGRESS]))
+		return false;
+#endif
+	return !list_empty(&skb->dev->nf_hooks_ingress);
 }
 
 static inline int nf_hook_ingress(struct sk_buff *skb)
@@ -16,8 +19,8 @@
 	struct nf_hook_state state;
 
 	nf_hook_state_init(&state, &skb->dev->nf_hooks_ingress,
-			   NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV, NULL,
-			   skb->dev, NULL, dev_net(skb->dev), NULL);
+			   NF_NETDEV_INGRESS, INT_MIN, NFPROTO_NETDEV,
+			   skb->dev, NULL, NULL, dev_net(skb->dev), NULL);
 	return nf_hook_slow(skb, &state);
 }
 
diff --git a/include/linux/of_dma.h b/include/linux/of_dma.h
index 36112cd..b90d8ec 100644
--- a/include/linux/of_dma.h
+++ b/include/linux/of_dma.h
@@ -80,7 +80,7 @@
 static inline struct dma_chan *of_dma_request_slave_channel(struct device_node *np,
 						     const char *name)
 {
-	return NULL;
+	return ERR_PTR(-ENODEV);
 }
 
 static inline struct dma_chan *of_dma_simple_xlate(struct of_phandle_args *dma_spec,
diff --git a/include/net/ip6_fib.h b/include/net/ip6_fib.h
index aaf9700..fb961a5 100644
--- a/include/net/ip6_fib.h
+++ b/include/net/ip6_fib.h
@@ -167,7 +167,8 @@
 
 static inline u32 rt6_get_cookie(const struct rt6_info *rt)
 {
-	if (rt->rt6i_flags & RTF_PCPU || unlikely(rt->dst.flags & DST_NOCACHE))
+	if (rt->rt6i_flags & RTF_PCPU ||
+	    (unlikely(rt->dst.flags & DST_NOCACHE) && rt->dst.from))
 		rt = (struct rt6_info *)(rt->dst.from);
 
 	return rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index aaee6fa..ff788b6 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -90,11 +90,12 @@
 	err = ip6_local_out(dev_net(skb_dst(skb)->dev), sk, skb);
 
 	if (net_xmit_eval(err) == 0) {
-		struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
+		struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
 		u64_stats_update_begin(&tstats->syncp);
 		tstats->tx_bytes += pkt_len;
 		tstats->tx_packets++;
 		u64_stats_update_end(&tstats->syncp);
+		put_cpu_ptr(tstats);
 	} else {
 		stats->tx_errors++;
 		stats->tx_aborted_errors++;
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index f6dafec..62a750a 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -287,12 +287,13 @@
 				       struct pcpu_sw_netstats __percpu *stats)
 {
 	if (err > 0) {
-		struct pcpu_sw_netstats *tstats = this_cpu_ptr(stats);
+		struct pcpu_sw_netstats *tstats = get_cpu_ptr(stats);
 
 		u64_stats_update_begin(&tstats->syncp);
 		tstats->tx_bytes += err;
 		tstats->tx_packets++;
 		u64_stats_update_end(&tstats->syncp);
+		put_cpu_ptr(tstats);
 	} else if (err < 0) {
 		err_stats->tx_errors++;
 		err_stats->tx_aborted_errors++;
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index c9149cc..4bd7508 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -618,6 +618,8 @@
 	void				(*eval)(const struct nft_expr *expr,
 						struct nft_regs *regs,
 						const struct nft_pktinfo *pkt);
+	int				(*clone)(struct nft_expr *dst,
+						 const struct nft_expr *src);
 	unsigned int			size;
 
 	int				(*init)(const struct nft_ctx *ctx,
@@ -660,10 +662,20 @@
 int nft_expr_dump(struct sk_buff *skb, unsigned int attr,
 		  const struct nft_expr *expr);
 
-static inline void nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
+static inline int nft_expr_clone(struct nft_expr *dst, struct nft_expr *src)
 {
+	int err;
+
 	__module_get(src->ops->type->owner);
-	memcpy(dst, src, src->ops->size);
+	if (src->ops->clone) {
+		dst->ops = src->ops;
+		err = src->ops->clone(dst, src);
+		if (err < 0)
+			return err;
+	} else {
+		memcpy(dst, src, src->ops->size);
+	}
+	return 0;
 }
 
 /**
diff --git a/include/net/sock.h b/include/net/sock.h
index bbf7c2c..7f89e4b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -2226,6 +2226,31 @@
 	return (1 << sk->sk_state) & (TCPF_LISTEN | TCPF_NEW_SYN_RECV);
 }
 
+/**
+ * sk_state_load - read sk->sk_state for lockless contexts
+ * @sk: socket pointer
+ *
+ * Paired with sk_state_store(). Used in places we do not hold socket lock :
+ * tcp_diag_get_info(), tcp_get_info(), tcp_poll(), get_tcp4_sock() ...
+ */
+static inline int sk_state_load(const struct sock *sk)
+{
+	return smp_load_acquire(&sk->sk_state);
+}
+
+/**
+ * sk_state_store - update sk->sk_state
+ * @sk: socket pointer
+ * @newstate: new state
+ *
+ * Paired with sk_state_load(). Should be used in contexts where
+ * state change might impact lockless readers.
+ */
+static inline void sk_state_store(struct sock *sk, int newstate)
+{
+	smp_store_release(&sk->sk_state, newstate);
+}
+
 void sock_enable_timestamp(struct sock *sk, int flag);
 int sock_get_timestamp(struct sock *, struct timeval __user *);
 int sock_get_timestampns(struct sock *, struct timespec __user *);
diff --git a/include/net/switchdev.h b/include/net/switchdev.h
index bc865e2..1d22ce9 100644
--- a/include/net/switchdev.h
+++ b/include/net/switchdev.h
@@ -323,7 +323,7 @@
 					  struct net_device *filter_dev,
 					  int idx)
 {
-	return -EOPNOTSUPP;
+       return idx;
 }
 
 static inline void switchdev_port_fwd_mark_set(struct net_device *dev,
diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c
index 6e53441..db545cb 100644
--- a/kernel/livepatch/core.c
+++ b/kernel/livepatch/core.c
@@ -294,6 +294,12 @@
 
 	for (reloc = obj->relocs; reloc->name; reloc++) {
 		if (!klp_is_module(obj)) {
+
+#if defined(CONFIG_RANDOMIZE_BASE)
+			/* If KASLR has been enabled, adjust old value accordingly */
+			if (kaslr_enabled())
+				reloc->val += kaslr_offset();
+#endif
 			ret = klp_verify_vmlinux_symbol(reloc->name,
 							reloc->val);
 			if (ret)
diff --git a/mm/memory.c b/mm/memory.c
index deb679c..c387430 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3015,9 +3015,9 @@
 		} else {
 			/*
 			 * The fault handler has no page to lock, so it holds
-			 * i_mmap_lock for write to protect against truncate.
+			 * i_mmap_lock for read to protect against truncate.
 			 */
-			i_mmap_unlock_write(vma->vm_file->f_mapping);
+			i_mmap_unlock_read(vma->vm_file->f_mapping);
 		}
 		goto uncharge_out;
 	}
@@ -3031,9 +3031,9 @@
 	} else {
 		/*
 		 * The fault handler has no page to lock, so it holds
-		 * i_mmap_lock for write to protect against truncate.
+		 * i_mmap_lock for read to protect against truncate.
 		 */
-		i_mmap_unlock_write(vma->vm_file->f_mapping);
+		i_mmap_unlock_read(vma->vm_file->f_mapping);
 	}
 	return ret;
 uncharge_out:
diff --git a/net/8021q/vlan_core.c b/net/8021q/vlan_core.c
index 496b275..e2ed698 100644
--- a/net/8021q/vlan_core.c
+++ b/net/8021q/vlan_core.c
@@ -30,7 +30,9 @@
 			skb->pkt_type = PACKET_HOST;
 	}
 
-	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR)) {
+	if (!(vlan_dev_priv(vlan_dev)->flags & VLAN_FLAG_REORDER_HDR) &&
+	    !netif_is_macvlan_port(vlan_dev) &&
+	    !netif_is_bridge_port(vlan_dev)) {
 		unsigned int offset = skb->data - skb_mac_header(skb);
 
 		/*
diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c
index f7e8dee..5f3f645 100644
--- a/net/bridge/br_stp.c
+++ b/net/bridge/br_stp.c
@@ -48,7 +48,7 @@
 
 	p->state = state;
 	err = switchdev_port_attr_set(p->dev, &attr);
-	if (err)
+	if (err && err != -EOPNOTSUPP)
 		br_warn(p->br, "error setting offload STP state on port %u(%s)\n",
 				(unsigned int) p->port_no, p->dev->name);
 }
diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c
index fa53d7a..5396ff08 100644
--- a/net/bridge/br_stp_if.c
+++ b/net/bridge/br_stp_if.c
@@ -50,7 +50,7 @@
 	p->config_pending = 0;
 
 	err = switchdev_port_attr_set(p->dev, &attr);
-	if (err)
+	if (err && err != -EOPNOTSUPP)
 		netdev_err(p->dev, "failed to set HW ageing time\n");
 }
 
diff --git a/net/core/dev.c b/net/core/dev.c
index ab9b8d0..ae00b89 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -2403,17 +2403,20 @@
 {
 	static const netdev_features_t null_features = 0;
 	struct net_device *dev = skb->dev;
-	const char *driver = "";
+	const char *name = "";
 
 	if (!net_ratelimit())
 		return;
 
-	if (dev && dev->dev.parent)
-		driver = dev_driver_string(dev->dev.parent);
-
+	if (dev) {
+		if (dev->dev.parent)
+			name = dev_driver_string(dev->dev.parent);
+		else
+			name = netdev_name(dev);
+	}
 	WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
 	     "gso_type=%d ip_summed=%d\n",
-	     driver, dev ? &dev->features : &null_features,
+	     name, dev ? &dev->features : &null_features,
 	     skb->sk ? &skb->sk->sk_route_caps : &null_features,
 	     skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
 	     skb_shinfo(skb)->gso_type, skb->ip_summed);
@@ -6426,11 +6429,16 @@
 
 	if (dev->netdev_ops->ndo_set_features)
 		err = dev->netdev_ops->ndo_set_features(dev, features);
+	else
+		err = 0;
 
 	if (unlikely(err < 0)) {
 		netdev_err(dev,
 			"set_features() failed (%d); wanted %pNF, left %pNF\n",
 			err, &features, &dev->features);
+		/* return non-0 since some features might have changed and
+		 * it's better to fire a spurious notification than miss it
+		 */
 		return -1;
 	}
 
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 1aa8437..e6af42d 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -857,7 +857,7 @@
 	struct sk_buff *skb = skb_peek_tail(&neigh->arp_queue);
 	/* keep skb alive even if arp_queue overflows */
 	if (skb)
-		skb = skb_copy(skb, GFP_ATOMIC);
+		skb = skb_clone(skb, GFP_ATOMIC);
 	write_unlock(&neigh->lock);
 	neigh->ops->solicit(neigh, skb);
 	atomic_inc(&neigh->probes);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 504bd17..34ba7a0 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -1045,15 +1045,156 @@
 	return 0;
 }
 
+static noinline_for_stack int rtnl_fill_stats(struct sk_buff *skb,
+					      struct net_device *dev)
+{
+	const struct rtnl_link_stats64 *stats;
+	struct rtnl_link_stats64 temp;
+	struct nlattr *attr;
+
+	stats = dev_get_stats(dev, &temp);
+
+	attr = nla_reserve(skb, IFLA_STATS,
+			   sizeof(struct rtnl_link_stats));
+	if (!attr)
+		return -EMSGSIZE;
+
+	copy_rtnl_link_stats(nla_data(attr), stats);
+
+	attr = nla_reserve(skb, IFLA_STATS64,
+			   sizeof(struct rtnl_link_stats64));
+	if (!attr)
+		return -EMSGSIZE;
+
+	copy_rtnl_link_stats64(nla_data(attr), stats);
+
+	return 0;
+}
+
+static noinline_for_stack int rtnl_fill_vfinfo(struct sk_buff *skb,
+					       struct net_device *dev,
+					       int vfs_num,
+					       struct nlattr *vfinfo)
+{
+	struct ifla_vf_rss_query_en vf_rss_query_en;
+	struct ifla_vf_link_state vf_linkstate;
+	struct ifla_vf_spoofchk vf_spoofchk;
+	struct ifla_vf_tx_rate vf_tx_rate;
+	struct ifla_vf_stats vf_stats;
+	struct ifla_vf_trust vf_trust;
+	struct ifla_vf_vlan vf_vlan;
+	struct ifla_vf_rate vf_rate;
+	struct nlattr *vf, *vfstats;
+	struct ifla_vf_mac vf_mac;
+	struct ifla_vf_info ivi;
+
+	/* Not all SR-IOV capable drivers support the
+	 * spoofcheck and "RSS query enable" query.  Preset to
+	 * -1 so the user space tool can detect that the driver
+	 * didn't report anything.
+	 */
+	ivi.spoofchk = -1;
+	ivi.rss_query_en = -1;
+	ivi.trusted = -1;
+	memset(ivi.mac, 0, sizeof(ivi.mac));
+	/* The default value for VF link state is "auto"
+	 * IFLA_VF_LINK_STATE_AUTO which equals zero
+	 */
+	ivi.linkstate = 0;
+	if (dev->netdev_ops->ndo_get_vf_config(dev, vfs_num, &ivi))
+		return 0;
+
+	vf_mac.vf =
+		vf_vlan.vf =
+		vf_rate.vf =
+		vf_tx_rate.vf =
+		vf_spoofchk.vf =
+		vf_linkstate.vf =
+		vf_rss_query_en.vf =
+		vf_trust.vf = ivi.vf;
+
+	memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
+	vf_vlan.vlan = ivi.vlan;
+	vf_vlan.qos = ivi.qos;
+	vf_tx_rate.rate = ivi.max_tx_rate;
+	vf_rate.min_tx_rate = ivi.min_tx_rate;
+	vf_rate.max_tx_rate = ivi.max_tx_rate;
+	vf_spoofchk.setting = ivi.spoofchk;
+	vf_linkstate.link_state = ivi.linkstate;
+	vf_rss_query_en.setting = ivi.rss_query_en;
+	vf_trust.setting = ivi.trusted;
+	vf = nla_nest_start(skb, IFLA_VF_INFO);
+	if (!vf) {
+		nla_nest_cancel(skb, vfinfo);
+		return -EMSGSIZE;
+	}
+	if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
+	    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
+	    nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
+		    &vf_rate) ||
+	    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
+		    &vf_tx_rate) ||
+	    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
+		    &vf_spoofchk) ||
+	    nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
+		    &vf_linkstate) ||
+	    nla_put(skb, IFLA_VF_RSS_QUERY_EN,
+		    sizeof(vf_rss_query_en),
+		    &vf_rss_query_en) ||
+	    nla_put(skb, IFLA_VF_TRUST,
+		    sizeof(vf_trust), &vf_trust))
+		return -EMSGSIZE;
+	memset(&vf_stats, 0, sizeof(vf_stats));
+	if (dev->netdev_ops->ndo_get_vf_stats)
+		dev->netdev_ops->ndo_get_vf_stats(dev, vfs_num,
+						&vf_stats);
+	vfstats = nla_nest_start(skb, IFLA_VF_STATS);
+	if (!vfstats) {
+		nla_nest_cancel(skb, vf);
+		nla_nest_cancel(skb, vfinfo);
+		return -EMSGSIZE;
+	}
+	if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
+			vf_stats.rx_packets) ||
+	    nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
+			vf_stats.tx_packets) ||
+	    nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
+			vf_stats.rx_bytes) ||
+	    nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
+			vf_stats.tx_bytes) ||
+	    nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
+			vf_stats.broadcast) ||
+	    nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
+			vf_stats.multicast))
+		return -EMSGSIZE;
+	nla_nest_end(skb, vfstats);
+	nla_nest_end(skb, vf);
+	return 0;
+}
+
+static int rtnl_fill_link_ifmap(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rtnl_link_ifmap map = {
+		.mem_start   = dev->mem_start,
+		.mem_end     = dev->mem_end,
+		.base_addr   = dev->base_addr,
+		.irq         = dev->irq,
+		.dma         = dev->dma,
+		.port        = dev->if_port,
+	};
+	if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
+		return -EMSGSIZE;
+
+	return 0;
+}
+
 static int rtnl_fill_ifinfo(struct sk_buff *skb, struct net_device *dev,
 			    int type, u32 pid, u32 seq, u32 change,
 			    unsigned int flags, u32 ext_filter_mask)
 {
 	struct ifinfomsg *ifm;
 	struct nlmsghdr *nlh;
-	struct rtnl_link_stats64 temp;
-	const struct rtnl_link_stats64 *stats;
-	struct nlattr *attr, *af_spec;
+	struct nlattr *af_spec;
 	struct rtnl_af_ops *af_ops;
 	struct net_device *upper_dev = netdev_master_upper_dev_get(dev);
 
@@ -1096,18 +1237,8 @@
 	    nla_put_u8(skb, IFLA_PROTO_DOWN, dev->proto_down))
 		goto nla_put_failure;
 
-	if (1) {
-		struct rtnl_link_ifmap map = {
-			.mem_start   = dev->mem_start,
-			.mem_end     = dev->mem_end,
-			.base_addr   = dev->base_addr,
-			.irq         = dev->irq,
-			.dma         = dev->dma,
-			.port        = dev->if_port,
-		};
-		if (nla_put(skb, IFLA_MAP, sizeof(map), &map))
-			goto nla_put_failure;
-	}
+	if (rtnl_fill_link_ifmap(skb, dev))
+		goto nla_put_failure;
 
 	if (dev->addr_len) {
 		if (nla_put(skb, IFLA_ADDRESS, dev->addr_len, dev->dev_addr) ||
@@ -1124,128 +1255,27 @@
 	if (rtnl_phys_switch_id_fill(skb, dev))
 		goto nla_put_failure;
 
-	attr = nla_reserve(skb, IFLA_STATS,
-			sizeof(struct rtnl_link_stats));
-	if (attr == NULL)
+	if (rtnl_fill_stats(skb, dev))
 		goto nla_put_failure;
 
-	stats = dev_get_stats(dev, &temp);
-	copy_rtnl_link_stats(nla_data(attr), stats);
-
-	attr = nla_reserve(skb, IFLA_STATS64,
-			sizeof(struct rtnl_link_stats64));
-	if (attr == NULL)
-		goto nla_put_failure;
-	copy_rtnl_link_stats64(nla_data(attr), stats);
-
 	if (dev->dev.parent && (ext_filter_mask & RTEXT_FILTER_VF) &&
 	    nla_put_u32(skb, IFLA_NUM_VF, dev_num_vf(dev->dev.parent)))
 		goto nla_put_failure;
 
-	if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent
-	    && (ext_filter_mask & RTEXT_FILTER_VF)) {
+	if (dev->netdev_ops->ndo_get_vf_config && dev->dev.parent &&
+	    ext_filter_mask & RTEXT_FILTER_VF) {
 		int i;
-
-		struct nlattr *vfinfo, *vf, *vfstats;
+		struct nlattr *vfinfo;
 		int num_vfs = dev_num_vf(dev->dev.parent);
 
 		vfinfo = nla_nest_start(skb, IFLA_VFINFO_LIST);
 		if (!vfinfo)
 			goto nla_put_failure;
 		for (i = 0; i < num_vfs; i++) {
-			struct ifla_vf_info ivi;
-			struct ifla_vf_mac vf_mac;
-			struct ifla_vf_vlan vf_vlan;
-			struct ifla_vf_rate vf_rate;
-			struct ifla_vf_tx_rate vf_tx_rate;
-			struct ifla_vf_spoofchk vf_spoofchk;
-			struct ifla_vf_link_state vf_linkstate;
-			struct ifla_vf_rss_query_en vf_rss_query_en;
-			struct ifla_vf_stats vf_stats;
-			struct ifla_vf_trust vf_trust;
-
-			/*
-			 * Not all SR-IOV capable drivers support the
-			 * spoofcheck and "RSS query enable" query.  Preset to
-			 * -1 so the user space tool can detect that the driver
-			 * didn't report anything.
-			 */
-			ivi.spoofchk = -1;
-			ivi.rss_query_en = -1;
-			ivi.trusted = -1;
-			memset(ivi.mac, 0, sizeof(ivi.mac));
-			/* The default value for VF link state is "auto"
-			 * IFLA_VF_LINK_STATE_AUTO which equals zero
-			 */
-			ivi.linkstate = 0;
-			if (dev->netdev_ops->ndo_get_vf_config(dev, i, &ivi))
-				break;
-			vf_mac.vf =
-				vf_vlan.vf =
-				vf_rate.vf =
-				vf_tx_rate.vf =
-				vf_spoofchk.vf =
-				vf_linkstate.vf =
-				vf_rss_query_en.vf =
-				vf_trust.vf = ivi.vf;
-
-			memcpy(vf_mac.mac, ivi.mac, sizeof(ivi.mac));
-			vf_vlan.vlan = ivi.vlan;
-			vf_vlan.qos = ivi.qos;
-			vf_tx_rate.rate = ivi.max_tx_rate;
-			vf_rate.min_tx_rate = ivi.min_tx_rate;
-			vf_rate.max_tx_rate = ivi.max_tx_rate;
-			vf_spoofchk.setting = ivi.spoofchk;
-			vf_linkstate.link_state = ivi.linkstate;
-			vf_rss_query_en.setting = ivi.rss_query_en;
-			vf_trust.setting = ivi.trusted;
-			vf = nla_nest_start(skb, IFLA_VF_INFO);
-			if (!vf) {
-				nla_nest_cancel(skb, vfinfo);
+			if (rtnl_fill_vfinfo(skb, dev, i, vfinfo))
 				goto nla_put_failure;
-			}
-			if (nla_put(skb, IFLA_VF_MAC, sizeof(vf_mac), &vf_mac) ||
-			    nla_put(skb, IFLA_VF_VLAN, sizeof(vf_vlan), &vf_vlan) ||
-			    nla_put(skb, IFLA_VF_RATE, sizeof(vf_rate),
-				    &vf_rate) ||
-			    nla_put(skb, IFLA_VF_TX_RATE, sizeof(vf_tx_rate),
-				    &vf_tx_rate) ||
-			    nla_put(skb, IFLA_VF_SPOOFCHK, sizeof(vf_spoofchk),
-				    &vf_spoofchk) ||
-			    nla_put(skb, IFLA_VF_LINK_STATE, sizeof(vf_linkstate),
-				    &vf_linkstate) ||
-			    nla_put(skb, IFLA_VF_RSS_QUERY_EN,
-				    sizeof(vf_rss_query_en),
-				    &vf_rss_query_en) ||
-			    nla_put(skb, IFLA_VF_TRUST,
-				    sizeof(vf_trust), &vf_trust))
-				goto nla_put_failure;
-			memset(&vf_stats, 0, sizeof(vf_stats));
-			if (dev->netdev_ops->ndo_get_vf_stats)
-				dev->netdev_ops->ndo_get_vf_stats(dev, i,
-								  &vf_stats);
-			vfstats = nla_nest_start(skb, IFLA_VF_STATS);
-			if (!vfstats) {
-				nla_nest_cancel(skb, vf);
-				nla_nest_cancel(skb, vfinfo);
-				goto nla_put_failure;
-			}
-			if (nla_put_u64(skb, IFLA_VF_STATS_RX_PACKETS,
-					vf_stats.rx_packets) ||
-			    nla_put_u64(skb, IFLA_VF_STATS_TX_PACKETS,
-					vf_stats.tx_packets) ||
-			    nla_put_u64(skb, IFLA_VF_STATS_RX_BYTES,
-					vf_stats.rx_bytes) ||
-			    nla_put_u64(skb, IFLA_VF_STATS_TX_BYTES,
-					vf_stats.tx_bytes) ||
-			    nla_put_u64(skb, IFLA_VF_STATS_BROADCAST,
-					vf_stats.broadcast) ||
-			    nla_put_u64(skb, IFLA_VF_STATS_MULTICAST,
-					vf_stats.multicast))
-				goto nla_put_failure;
-			nla_nest_end(skb, vfstats);
-			nla_nest_end(skb, vf);
 		}
+
 		nla_nest_end(skb, vfinfo);
 	}
 
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index aa41e6d..152b9c7 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -4268,7 +4268,8 @@
 		return NULL;
 	}
 
-	memmove(skb->data - ETH_HLEN, skb->data - VLAN_ETH_HLEN, 2 * ETH_ALEN);
+	memmove(skb->data - ETH_HLEN, skb->data - skb->mac_len,
+		2 * ETH_ALEN);
 	skb->mac_header += VLAN_HLEN;
 	return skb;
 }
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 1feb15f..46b9c88 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -563,7 +563,7 @@
 	int max_retries, thresh;
 	u8 defer_accept;
 
-	if (sk_listener->sk_state != TCP_LISTEN)
+	if (sk_state_load(sk_listener) != TCP_LISTEN)
 		goto drop;
 
 	max_retries = icsk->icsk_syn_retries ? : sysctl_tcp_synack_retries;
@@ -749,7 +749,7 @@
 	 * It is OK, because this socket enters to hash table only
 	 * after validation is complete.
 	 */
-	sk->sk_state = TCP_LISTEN;
+	sk_state_store(sk, TCP_LISTEN);
 	if (!sk->sk_prot->get_port(sk, inet->inet_num)) {
 		inet->inet_sport = htons(inet->inet_num);
 
diff --git a/net/ipv4/netfilter/nf_nat_pptp.c b/net/ipv4/netfilter/nf_nat_pptp.c
index 657d230..b3ca21b 100644
--- a/net/ipv4/netfilter/nf_nat_pptp.c
+++ b/net/ipv4/netfilter/nf_nat_pptp.c
@@ -45,7 +45,7 @@
 	struct net *net = nf_ct_net(ct);
 	const struct nf_conn *master = ct->master;
 	struct nf_conntrack_expect *other_exp;
-	struct nf_conntrack_tuple t;
+	struct nf_conntrack_tuple t = {};
 	const struct nf_ct_pptp_master *ct_pptp_info;
 	const struct nf_nat_pptp *nat_pptp_info;
 	struct nf_nat_range range;
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 8c0d0bd..63e5be0 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -406,10 +406,12 @@
 			ip_select_ident(net, skb, NULL);
 
 		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+		skb->transport_header += iphlen;
+		if (iph->protocol == IPPROTO_ICMP &&
+		    length >= iphlen + sizeof(struct icmphdr))
+			icmp_out_count(net, ((struct icmphdr *)
+				skb_transport_header(skb))->type);
 	}
-	if (iph->protocol == IPPROTO_ICMP)
-		icmp_out_count(net, ((struct icmphdr *)
-			skb_transport_header(skb))->type);
 
 	err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT,
 		      net, sk, skb, NULL, rt->dst.dev,
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 0cfa7c0..c172877 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -451,11 +451,14 @@
 	unsigned int mask;
 	struct sock *sk = sock->sk;
 	const struct tcp_sock *tp = tcp_sk(sk);
+	int state;
 
 	sock_rps_record_flow(sk);
 
 	sock_poll_wait(file, sk_sleep(sk), wait);
-	if (sk->sk_state == TCP_LISTEN)
+
+	state = sk_state_load(sk);
+	if (state == TCP_LISTEN)
 		return inet_csk_listen_poll(sk);
 
 	/* Socket is not locked. We are protected from async events
@@ -492,14 +495,14 @@
 	 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
 	 * blocking on fresh not-connected or disconnected socket. --ANK
 	 */
-	if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
+	if (sk->sk_shutdown == SHUTDOWN_MASK || state == TCP_CLOSE)
 		mask |= POLLHUP;
 	if (sk->sk_shutdown & RCV_SHUTDOWN)
 		mask |= POLLIN | POLLRDNORM | POLLRDHUP;
 
 	/* Connected or passive Fast Open socket? */
-	if (sk->sk_state != TCP_SYN_SENT &&
-	    (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk)) {
+	if (state != TCP_SYN_SENT &&
+	    (state != TCP_SYN_RECV || tp->fastopen_rsk)) {
 		int target = sock_rcvlowat(sk, 0, INT_MAX);
 
 		if (tp->urg_seq == tp->copied_seq &&
@@ -507,9 +510,6 @@
 		    tp->urg_data)
 			target++;
 
-		/* Potential race condition. If read of tp below will
-		 * escape above sk->sk_state, we can be illegally awaken
-		 * in SYN_* states. */
 		if (tp->rcv_nxt - tp->copied_seq >= target)
 			mask |= POLLIN | POLLRDNORM;
 
@@ -1934,7 +1934,7 @@
 	/* Change state AFTER socket is unhashed to avoid closed
 	 * socket sitting in hash tables.
 	 */
-	sk->sk_state = state;
+	sk_state_store(sk, state);
 
 #ifdef STATE_TRACE
 	SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
@@ -2644,7 +2644,8 @@
 	if (sk->sk_type != SOCK_STREAM)
 		return;
 
-	info->tcpi_state = sk->sk_state;
+	info->tcpi_state = sk_state_load(sk);
+
 	info->tcpi_ca_state = icsk->icsk_ca_state;
 	info->tcpi_retransmits = icsk->icsk_retransmits;
 	info->tcpi_probes = icsk->icsk_probes_out;
@@ -2672,7 +2673,7 @@
 	info->tcpi_snd_mss = tp->mss_cache;
 	info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
 
-	if (sk->sk_state == TCP_LISTEN) {
+	if (info->tcpi_state == TCP_LISTEN) {
 		info->tcpi_unacked = sk->sk_ack_backlog;
 		info->tcpi_sacked = sk->sk_max_ack_backlog;
 	} else {
diff --git a/net/ipv4/tcp_diag.c b/net/ipv4/tcp_diag.c
index 479f349..b316040 100644
--- a/net/ipv4/tcp_diag.c
+++ b/net/ipv4/tcp_diag.c
@@ -21,7 +21,7 @@
 {
 	struct tcp_info *info = _info;
 
-	if (sk->sk_state == TCP_LISTEN) {
+	if (sk_state_load(sk) == TCP_LISTEN) {
 		r->idiag_rqueue = sk->sk_ack_backlog;
 		r->idiag_wqueue = sk->sk_max_ack_backlog;
 	} else if (sk->sk_type == SOCK_STREAM) {
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 950e28c0c..ba09016 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -2158,6 +2158,7 @@
 	__u16 destp = ntohs(inet->inet_dport);
 	__u16 srcp = ntohs(inet->inet_sport);
 	int rx_queue;
+	int state;
 
 	if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
 	    icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
@@ -2175,17 +2176,18 @@
 		timer_expires = jiffies;
 	}
 
-	if (sk->sk_state == TCP_LISTEN)
+	state = sk_state_load(sk);
+	if (state == TCP_LISTEN)
 		rx_queue = sk->sk_ack_backlog;
 	else
-		/*
-		 * because we dont lock socket, we might find a transient negative value
+		/* Because we don't lock the socket,
+		 * we might find a transient negative value.
 		 */
 		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
 
 	seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
 			"%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
-		i, src, srcp, dest, destp, sk->sk_state,
+		i, src, srcp, dest, destp, state,
 		tp->write_seq - tp->snd_una,
 		rx_queue,
 		timer_active,
@@ -2199,8 +2201,8 @@
 		jiffies_to_clock_t(icsk->icsk_ack.ato),
 		(icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
 		tp->snd_cwnd,
-		sk->sk_state == TCP_LISTEN ?
-		    (fastopenq ? fastopenq->max_qlen : 0) :
+		state == TCP_LISTEN ?
+		    fastopenq->max_qlen :
 		    (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
 }
 
diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c
index 124338a..5ee56d0 100644
--- a/net/ipv6/mcast.c
+++ b/net/ipv6/mcast.c
@@ -1651,7 +1651,6 @@
 	if (!err) {
 		ICMP6MSGOUT_INC_STATS(net, idev, ICMPV6_MLD2_REPORT);
 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
-		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, payload_len);
 	} else {
 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
 	}
@@ -2015,7 +2014,6 @@
 	if (!err) {
 		ICMP6MSGOUT_INC_STATS(net, idev, type);
 		ICMP6_INC_STATS(net, idev, ICMP6_MIB_OUTMSGS);
-		IP6_UPD_PO_STATS(net, idev, IPSTATS_MIB_OUTMCAST, full_len);
 	} else
 		IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
 
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index c8bc9b4..6f01fe1 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -404,6 +404,14 @@
 	}
 }
 
+static bool __rt6_check_expired(const struct rt6_info *rt)
+{
+	if (rt->rt6i_flags & RTF_EXPIRES)
+		return time_after(jiffies, rt->dst.expires);
+	else
+		return false;
+}
+
 static bool rt6_check_expired(const struct rt6_info *rt)
 {
 	if (rt->rt6i_flags & RTF_EXPIRES) {
@@ -1252,7 +1260,8 @@
 
 static struct dst_entry *rt6_dst_from_check(struct rt6_info *rt, u32 cookie)
 {
-	if (rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
+	if (!__rt6_check_expired(rt) &&
+	    rt->dst.obsolete == DST_OBSOLETE_FORCE_CHK &&
 	    rt6_check((struct rt6_info *)(rt->dst.from), cookie))
 		return &rt->dst;
 	else
@@ -1272,7 +1281,8 @@
 
 	rt6_dst_from_metrics_check(rt);
 
-	if ((rt->rt6i_flags & RTF_PCPU) || unlikely(dst->flags & DST_NOCACHE))
+	if (rt->rt6i_flags & RTF_PCPU ||
+	    (unlikely(dst->flags & DST_NOCACHE) && rt->dst.from))
 		return rt6_dst_from_check(rt, cookie);
 	else
 		return rt6_check(rt, cookie);
@@ -1322,6 +1332,12 @@
 	rt6_update_expires(rt, net->ipv6.sysctl.ip6_rt_mtu_expires);
 }
 
+static bool rt6_cache_allowed_for_pmtu(const struct rt6_info *rt)
+{
+	return !(rt->rt6i_flags & RTF_CACHE) &&
+		(rt->rt6i_flags & RTF_PCPU || rt->rt6i_node);
+}
+
 static void __ip6_rt_update_pmtu(struct dst_entry *dst, const struct sock *sk,
 				 const struct ipv6hdr *iph, u32 mtu)
 {
@@ -1335,7 +1351,7 @@
 	if (mtu >= dst_mtu(dst))
 		return;
 
-	if (rt6->rt6i_flags & RTF_CACHE) {
+	if (!rt6_cache_allowed_for_pmtu(rt6)) {
 		rt6_do_update_pmtu(rt6, mtu);
 	} else {
 		const struct in6_addr *daddr, *saddr;
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 5baa8e7..c5429a6 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1690,6 +1690,8 @@
 	const struct tcp_sock *tp = tcp_sk(sp);
 	const struct inet_connection_sock *icsk = inet_csk(sp);
 	const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
+	int rx_queue;
+	int state;
 
 	dest  = &sp->sk_v6_daddr;
 	src   = &sp->sk_v6_rcv_saddr;
@@ -1710,6 +1712,15 @@
 		timer_expires = jiffies;
 	}
 
+	state = sk_state_load(sp);
+	if (state == TCP_LISTEN)
+		rx_queue = sp->sk_ack_backlog;
+	else
+		/* Because we don't lock the socket,
+		 * we might find a transient negative value.
+		 */
+		rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
+
 	seq_printf(seq,
 		   "%4d: %08X%08X%08X%08X:%04X %08X%08X%08X%08X:%04X "
 		   "%02X %08X:%08X %02X:%08lX %08X %5u %8d %lu %d %pK %lu %lu %u %u %d\n",
@@ -1718,9 +1729,9 @@
 		   src->s6_addr32[2], src->s6_addr32[3], srcp,
 		   dest->s6_addr32[0], dest->s6_addr32[1],
 		   dest->s6_addr32[2], dest->s6_addr32[3], destp,
-		   sp->sk_state,
-		   tp->write_seq-tp->snd_una,
-		   (sp->sk_state == TCP_LISTEN) ? sp->sk_ack_backlog : (tp->rcv_nxt - tp->copied_seq),
+		   state,
+		   tp->write_seq - tp->snd_una,
+		   rx_queue,
 		   timer_active,
 		   jiffies_delta_to_clock_t(timer_expires - jiffies),
 		   icsk->icsk_retransmits,
@@ -1732,7 +1743,7 @@
 		   jiffies_to_clock_t(icsk->icsk_ack.ato),
 		   (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
 		   tp->snd_cwnd,
-		   sp->sk_state == TCP_LISTEN ?
+		   state == TCP_LISTEN ?
 			fastopenq->max_qlen :
 			(tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh)
 		   );
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index e22349e..4692782 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -869,7 +869,7 @@
 	depends on IPV6 || IPV6=n
 	depends on !NF_CONNTRACK || NF_CONNTRACK
 	select NF_DUP_IPV4
-	select NF_DUP_IPV6 if IP6_NF_IPTABLES
+	select NF_DUP_IPV6 if IP6_NF_IPTABLES != n
 	---help---
 	This option adds a "TEE" target with which a packet can be cloned and
 	this clone be rerouted to another nexthop.
@@ -882,7 +882,7 @@
 	depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
 	depends on IP_NF_MANGLE
 	select NF_DEFRAG_IPV4
-	select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
+	select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
 	help
 	  This option adds a `TPROXY' target, which is somewhat similar to
 	  REDIRECT.  It can only be used in the mangle table and is useful
@@ -1375,7 +1375,7 @@
 	depends on IPV6 || IPV6=n
 	depends on IP6_NF_IPTABLES || IP6_NF_IPTABLES=n
 	select NF_DEFRAG_IPV4
-	select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
+	select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES != n
 	help
 	  This option adds a `socket' match, which can be used to match
 	  packets for which a TCP or UDP socket lookup finds a valid socket.
diff --git a/net/netfilter/ipset/ip_set_bitmap_gen.h b/net/netfilter/ipset/ip_set_bitmap_gen.h
index d05e759..b0bc475 100644
--- a/net/netfilter/ipset/ip_set_bitmap_gen.h
+++ b/net/netfilter/ipset/ip_set_bitmap_gen.h
@@ -33,7 +33,7 @@
 #define mtype_gc		IPSET_TOKEN(MTYPE, _gc)
 #define mtype			MTYPE
 
-#define get_ext(set, map, id)	((map)->extensions + (set)->dsize * (id))
+#define get_ext(set, map, id)	((map)->extensions + ((set)->dsize * (id)))
 
 static void
 mtype_gc_init(struct ip_set *set, void (*gc)(unsigned long ul_set))
@@ -67,12 +67,9 @@
 		del_timer_sync(&map->gc);
 
 	ip_set_free(map->members);
-	if (set->dsize) {
-		if (set->extensions & IPSET_EXT_DESTROY)
-			mtype_ext_cleanup(set);
-		ip_set_free(map->extensions);
-	}
-	kfree(map);
+	if (set->dsize && set->extensions & IPSET_EXT_DESTROY)
+		mtype_ext_cleanup(set);
+	ip_set_free(map);
 
 	set->data = NULL;
 }
@@ -92,16 +89,14 @@
 {
 	const struct mtype *map = set->data;
 	struct nlattr *nested;
+	size_t memsize = sizeof(*map) + map->memsize;
 
 	nested = ipset_nest_start(skb, IPSET_ATTR_DATA);
 	if (!nested)
 		goto nla_put_failure;
 	if (mtype_do_head(skb, map) ||
 	    nla_put_net32(skb, IPSET_ATTR_REFERENCES, htonl(set->ref - 1)) ||
-	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE,
-			  htonl(sizeof(*map) +
-				map->memsize +
-				set->dsize * map->elements)))
+	    nla_put_net32(skb, IPSET_ATTR_MEMSIZE, htonl(memsize)))
 		goto nla_put_failure;
 	if (unlikely(ip_set_put_flags(skb, set)))
 		goto nla_put_failure;
diff --git a/net/netfilter/ipset/ip_set_bitmap_ip.c b/net/netfilter/ipset/ip_set_bitmap_ip.c
index 64a5643..4783eff 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ip.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ip.c
@@ -41,7 +41,6 @@
 /* Type structure */
 struct bitmap_ip {
 	void *members;		/* the set members */
-	void *extensions;	/* data extensions */
 	u32 first_ip;		/* host byte order, included in range */
 	u32 last_ip;		/* host byte order, included in range */
 	u32 elements;		/* number of max elements in the set */
@@ -49,6 +48,8 @@
 	size_t memsize;		/* members size */
 	u8 netmask;		/* subnet netmask */
 	struct timer_list gc;	/* garbage collection */
+	unsigned char extensions[0]	/* data extensions */
+		__aligned(__alignof__(u64));
 };
 
 /* ADT structure for generic function args */
@@ -224,13 +225,6 @@
 	map->members = ip_set_alloc(map->memsize);
 	if (!map->members)
 		return false;
-	if (set->dsize) {
-		map->extensions = ip_set_alloc(set->dsize * elements);
-		if (!map->extensions) {
-			kfree(map->members);
-			return false;
-		}
-	}
 	map->first_ip = first_ip;
 	map->last_ip = last_ip;
 	map->elements = elements;
@@ -316,13 +310,13 @@
 	pr_debug("hosts %u, elements %llu\n",
 		 hosts, (unsigned long long)elements);
 
-	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	set->dsize = ip_set_elem_len(set, tb, 0, 0);
+	map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
 	if (!map)
 		return -ENOMEM;
 
 	map->memsize = bitmap_bytes(0, elements - 1);
 	set->variant = &bitmap_ip;
-	set->dsize = ip_set_elem_len(set, tb, 0);
 	if (!init_map_ip(set, map, first_ip, last_ip,
 			 elements, hosts, netmask)) {
 		kfree(map);
diff --git a/net/netfilter/ipset/ip_set_bitmap_ipmac.c b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
index 1430535..29dde20 100644
--- a/net/netfilter/ipset/ip_set_bitmap_ipmac.c
+++ b/net/netfilter/ipset/ip_set_bitmap_ipmac.c
@@ -47,24 +47,26 @@
 /* Type structure */
 struct bitmap_ipmac {
 	void *members;		/* the set members */
-	void *extensions;	/* MAC + data extensions */
 	u32 first_ip;		/* host byte order, included in range */
 	u32 last_ip;		/* host byte order, included in range */
 	u32 elements;		/* number of max elements in the set */
 	size_t memsize;		/* members size */
 	struct timer_list gc;	/* garbage collector */
+	unsigned char extensions[0]	/* MAC + data extensions */
+		__aligned(__alignof__(u64));
 };
 
 /* ADT structure for generic function args */
 struct bitmap_ipmac_adt_elem {
+	unsigned char ether[ETH_ALEN] __aligned(2);
 	u16 id;
-	unsigned char *ether;
+	u16 add_mac;
 };
 
 struct bitmap_ipmac_elem {
 	unsigned char ether[ETH_ALEN];
 	unsigned char filled;
-} __attribute__ ((aligned));
+} __aligned(__alignof__(u64));
 
 static inline u32
 ip_to_id(const struct bitmap_ipmac *m, u32 ip)
@@ -72,11 +74,11 @@
 	return ip - m->first_ip;
 }
 
-static inline struct bitmap_ipmac_elem *
-get_elem(void *extensions, u16 id, size_t dsize)
-{
-	return (struct bitmap_ipmac_elem *)(extensions + id * dsize);
-}
+#define get_elem(extensions, id, dsize)		\
+	(struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
+
+#define get_const_elem(extensions, id, dsize)	\
+	(const struct bitmap_ipmac_elem *)(extensions + (id) * (dsize))
 
 /* Common functions */
 
@@ -88,10 +90,9 @@
 
 	if (!test_bit(e->id, map->members))
 		return 0;
-	elem = get_elem(map->extensions, e->id, dsize);
-	if (elem->filled == MAC_FILLED)
-		return !e->ether ||
-		       ether_addr_equal(e->ether, elem->ether);
+	elem = get_const_elem(map->extensions, e->id, dsize);
+	if (e->add_mac && elem->filled == MAC_FILLED)
+		return ether_addr_equal(e->ether, elem->ether);
 	/* Trigger kernel to fill out the ethernet address */
 	return -EAGAIN;
 }
@@ -103,7 +104,7 @@
 
 	if (!test_bit(id, map->members))
 		return 0;
-	elem = get_elem(map->extensions, id, dsize);
+	elem = get_const_elem(map->extensions, id, dsize);
 	/* Timer not started for the incomplete elements */
 	return elem->filled == MAC_FILLED;
 }
@@ -133,7 +134,7 @@
 		 * and we can reuse it later when MAC is filled out,
 		 * possibly by the kernel
 		 */
-		if (e->ether)
+		if (e->add_mac)
 			ip_set_timeout_set(timeout, t);
 		else
 			*timeout = t;
@@ -150,7 +151,7 @@
 	elem = get_elem(map->extensions, e->id, dsize);
 	if (test_bit(e->id, map->members)) {
 		if (elem->filled == MAC_FILLED) {
-			if (e->ether &&
+			if (e->add_mac &&
 			    (flags & IPSET_FLAG_EXIST) &&
 			    !ether_addr_equal(e->ether, elem->ether)) {
 				/* memcpy isn't atomic */
@@ -159,7 +160,7 @@
 				ether_addr_copy(elem->ether, e->ether);
 			}
 			return IPSET_ADD_FAILED;
-		} else if (!e->ether)
+		} else if (!e->add_mac)
 			/* Already added without ethernet address */
 			return IPSET_ADD_FAILED;
 		/* Fill the MAC address and trigger the timer activation */
@@ -168,7 +169,7 @@
 		ether_addr_copy(elem->ether, e->ether);
 		elem->filled = MAC_FILLED;
 		return IPSET_ADD_START_STORED_TIMEOUT;
-	} else if (e->ether) {
+	} else if (e->add_mac) {
 		/* We can store MAC too */
 		ether_addr_copy(elem->ether, e->ether);
 		elem->filled = MAC_FILLED;
@@ -191,7 +192,7 @@
 		     u32 id, size_t dsize)
 {
 	const struct bitmap_ipmac_elem *elem =
-		get_elem(map->extensions, id, dsize);
+		get_const_elem(map->extensions, id, dsize);
 
 	return nla_put_ipaddr4(skb, IPSET_ATTR_IP,
 			       htonl(map->first_ip + id)) ||
@@ -213,7 +214,7 @@
 {
 	struct bitmap_ipmac *map = set->data;
 	ipset_adtfn adtfn = set->variant->adt[adt];
-	struct bitmap_ipmac_adt_elem e = { .id = 0 };
+	struct bitmap_ipmac_adt_elem e = { .id = 0, .add_mac = 1 };
 	struct ip_set_ext ext = IP_SET_INIT_KEXT(skb, opt, set);
 	u32 ip;
 
@@ -231,7 +232,7 @@
 		return -EINVAL;
 
 	e.id = ip_to_id(map, ip);
-	e.ether = eth_hdr(skb)->h_source;
+	memcpy(e.ether, eth_hdr(skb)->h_source, ETH_ALEN);
 
 	return adtfn(set, &e, &ext, &opt->ext, opt->cmdflags);
 }
@@ -265,11 +266,10 @@
 		return -IPSET_ERR_BITMAP_RANGE;
 
 	e.id = ip_to_id(map, ip);
-	if (tb[IPSET_ATTR_ETHER])
-		e.ether = nla_data(tb[IPSET_ATTR_ETHER]);
-	else
-		e.ether = NULL;
-
+	if (tb[IPSET_ATTR_ETHER]) {
+		memcpy(e.ether, nla_data(tb[IPSET_ATTR_ETHER]), ETH_ALEN);
+		e.add_mac = 1;
+	}
 	ret = adtfn(set, &e, &ext, &ext, flags);
 
 	return ip_set_eexist(ret, flags) ? 0 : ret;
@@ -300,13 +300,6 @@
 	map->members = ip_set_alloc(map->memsize);
 	if (!map->members)
 		return false;
-	if (set->dsize) {
-		map->extensions = ip_set_alloc(set->dsize * elements);
-		if (!map->extensions) {
-			kfree(map->members);
-			return false;
-		}
-	}
 	map->first_ip = first_ip;
 	map->last_ip = last_ip;
 	map->elements = elements;
@@ -361,14 +354,15 @@
 	if (elements > IPSET_BITMAP_MAX_RANGE + 1)
 		return -IPSET_ERR_BITMAP_RANGE_SIZE;
 
-	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	set->dsize = ip_set_elem_len(set, tb,
+				     sizeof(struct bitmap_ipmac_elem),
+				     __alignof__(struct bitmap_ipmac_elem));
+	map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
 	if (!map)
 		return -ENOMEM;
 
 	map->memsize = bitmap_bytes(0, elements - 1);
 	set->variant = &bitmap_ipmac;
-	set->dsize = ip_set_elem_len(set, tb,
-				     sizeof(struct bitmap_ipmac_elem));
 	if (!init_map_ipmac(set, map, first_ip, last_ip, elements)) {
 		kfree(map);
 		return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_bitmap_port.c b/net/netfilter/ipset/ip_set_bitmap_port.c
index 5338ccd..7f0c733 100644
--- a/net/netfilter/ipset/ip_set_bitmap_port.c
+++ b/net/netfilter/ipset/ip_set_bitmap_port.c
@@ -35,12 +35,13 @@
 /* Type structure */
 struct bitmap_port {
 	void *members;		/* the set members */
-	void *extensions;	/* data extensions */
 	u16 first_port;		/* host byte order, included in range */
 	u16 last_port;		/* host byte order, included in range */
 	u32 elements;		/* number of max elements in the set */
 	size_t memsize;		/* members size */
 	struct timer_list gc;	/* garbage collection */
+	unsigned char extensions[0]	/* data extensions */
+		__aligned(__alignof__(u64));
 };
 
 /* ADT structure for generic function args */
@@ -209,13 +210,6 @@
 	map->members = ip_set_alloc(map->memsize);
 	if (!map->members)
 		return false;
-	if (set->dsize) {
-		map->extensions = ip_set_alloc(set->dsize * map->elements);
-		if (!map->extensions) {
-			kfree(map->members);
-			return false;
-		}
-	}
 	map->first_port = first_port;
 	map->last_port = last_port;
 	set->timeout = IPSET_NO_TIMEOUT;
@@ -232,6 +226,7 @@
 {
 	struct bitmap_port *map;
 	u16 first_port, last_port;
+	u32 elements;
 
 	if (unlikely(!ip_set_attr_netorder(tb, IPSET_ATTR_PORT) ||
 		     !ip_set_attr_netorder(tb, IPSET_ATTR_PORT_TO) ||
@@ -248,14 +243,15 @@
 		last_port = tmp;
 	}
 
-	map = kzalloc(sizeof(*map), GFP_KERNEL);
+	elements = last_port - first_port + 1;
+	set->dsize = ip_set_elem_len(set, tb, 0, 0);
+	map = ip_set_alloc(sizeof(*map) + elements * set->dsize);
 	if (!map)
 		return -ENOMEM;
 
-	map->elements = last_port - first_port + 1;
+	map->elements = elements;
 	map->memsize = bitmap_bytes(0, map->elements);
 	set->variant = &bitmap_port;
-	set->dsize = ip_set_elem_len(set, tb, 0);
 	if (!init_map_port(set, map, first_port, last_port)) {
 		kfree(map);
 		return -ENOMEM;
diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
index 69ab9c26..54f3d7c 100644
--- a/net/netfilter/ipset/ip_set_core.c
+++ b/net/netfilter/ipset/ip_set_core.c
@@ -364,25 +364,27 @@
 }
 
 size_t
-ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len)
+ip_set_elem_len(struct ip_set *set, struct nlattr *tb[], size_t len,
+		size_t align)
 {
 	enum ip_set_ext_id id;
-	size_t offset = len;
 	u32 cadt_flags = 0;
 
 	if (tb[IPSET_ATTR_CADT_FLAGS])
 		cadt_flags = ip_set_get_h32(tb[IPSET_ATTR_CADT_FLAGS]);
 	if (cadt_flags & IPSET_FLAG_WITH_FORCEADD)
 		set->flags |= IPSET_CREATE_FLAG_FORCEADD;
+	if (!align)
+		align = 1;
 	for (id = 0; id < IPSET_EXT_ID_MAX; id++) {
 		if (!add_extension(id, cadt_flags, tb))
 			continue;
-		offset = ALIGN(offset, ip_set_extensions[id].align);
-		set->offset[id] = offset;
+		len = ALIGN(len, ip_set_extensions[id].align);
+		set->offset[id] = len;
 		set->extensions |= ip_set_extensions[id].type;
-		offset += ip_set_extensions[id].len;
+		len += ip_set_extensions[id].len;
 	}
-	return offset;
+	return ALIGN(len, align);
 }
 EXPORT_SYMBOL_GPL(ip_set_elem_len);
 
diff --git a/net/netfilter/ipset/ip_set_hash_gen.h b/net/netfilter/ipset/ip_set_hash_gen.h
index 691b54f..e5336ab 100644
--- a/net/netfilter/ipset/ip_set_hash_gen.h
+++ b/net/netfilter/ipset/ip_set_hash_gen.h
@@ -72,8 +72,9 @@
 	DECLARE_BITMAP(used, AHASH_MAX_TUNED);
 	u8 size;		/* size of the array */
 	u8 pos;			/* position of the first free entry */
-	unsigned char value[0];	/* the array of the values */
-} __attribute__ ((aligned));
+	unsigned char value[0]	/* the array of the values */
+		__aligned(__alignof__(u64));
+};
 
 /* The hash table: the table size stored here in order to make resizing easy */
 struct htable {
@@ -475,7 +476,7 @@
 mtype_expire(struct ip_set *set, struct htype *h, u8 nets_length, size_t dsize)
 {
 	struct htable *t;
-	struct hbucket *n;
+	struct hbucket *n, *tmp;
 	struct mtype_elem *data;
 	u32 i, j, d;
 #ifdef IP_SET_HASH_WITH_NETS
@@ -510,9 +511,14 @@
 			}
 		}
 		if (d >= AHASH_INIT_SIZE) {
-			struct hbucket *tmp = kzalloc(sizeof(*tmp) +
-					(n->size - AHASH_INIT_SIZE) * dsize,
-					GFP_ATOMIC);
+			if (d >= n->size) {
+				rcu_assign_pointer(hbucket(t, i), NULL);
+				kfree_rcu(n, rcu);
+				continue;
+			}
+			tmp = kzalloc(sizeof(*tmp) +
+				      (n->size - AHASH_INIT_SIZE) * dsize,
+				      GFP_ATOMIC);
 			if (!tmp)
 				/* Still try to delete expired elements */
 				continue;
@@ -522,7 +528,7 @@
 					continue;
 				data = ahash_data(n, j, dsize);
 				memcpy(tmp->value + d * dsize, data, dsize);
-				set_bit(j, tmp->used);
+				set_bit(d, tmp->used);
 				d++;
 			}
 			tmp->pos = d;
@@ -1323,12 +1329,14 @@
 #endif
 		set->variant = &IPSET_TOKEN(HTYPE, 4_variant);
 		set->dsize = ip_set_elem_len(set, tb,
-				sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)));
+			sizeof(struct IPSET_TOKEN(HTYPE, 4_elem)),
+			__alignof__(struct IPSET_TOKEN(HTYPE, 4_elem)));
 #ifndef IP_SET_PROTO_UNDEF
 	} else {
 		set->variant = &IPSET_TOKEN(HTYPE, 6_variant);
 		set->dsize = ip_set_elem_len(set, tb,
-				sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)));
+			sizeof(struct IPSET_TOKEN(HTYPE, 6_elem)),
+			__alignof__(struct IPSET_TOKEN(HTYPE, 6_elem)));
 	}
 #endif
 	if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipset/ip_set_list_set.c b/net/netfilter/ipset/ip_set_list_set.c
index 5a30ce6..bbede95 100644
--- a/net/netfilter/ipset/ip_set_list_set.c
+++ b/net/netfilter/ipset/ip_set_list_set.c
@@ -31,7 +31,7 @@
 	struct rcu_head rcu;
 	struct list_head list;
 	ip_set_id_t id;
-};
+} __aligned(__alignof__(u64));
 
 struct set_adt_elem {
 	ip_set_id_t id;
@@ -618,7 +618,8 @@
 		size = IP_SET_LIST_MIN_SIZE;
 
 	set->variant = &set_variant;
-	set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem));
+	set->dsize = ip_set_elem_len(set, tb, sizeof(struct set_elem),
+				     __alignof__(struct set_elem));
 	if (!init_list_set(net, set, size))
 		return -ENOMEM;
 	if (tb[IPSET_ATTR_TIMEOUT]) {
diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
index 1e24fff..f57b4dc 100644
--- a/net/netfilter/ipvs/ip_vs_core.c
+++ b/net/netfilter/ipvs/ip_vs_core.c
@@ -1176,6 +1176,7 @@
 	struct ip_vs_protocol *pp;
 	struct ip_vs_proto_data *pd;
 	struct ip_vs_conn *cp;
+	struct sock *sk;
 
 	EnterFunction(11);
 
@@ -1183,13 +1184,12 @@
 	if (skb->ipvs_property)
 		return NF_ACCEPT;
 
+	sk = skb_to_full_sk(skb);
 	/* Bad... Do not break raw sockets */
-	if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
+	if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
 		     af == AF_INET)) {
-		struct sock *sk = skb->sk;
-		struct inet_sock *inet = inet_sk(skb->sk);
 
-		if (inet && sk->sk_family == PF_INET && inet->nodefrag)
+		if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
 			return NF_ACCEPT;
 	}
 
@@ -1681,6 +1681,7 @@
 	struct ip_vs_conn *cp;
 	int ret, pkts;
 	int conn_reuse_mode;
+	struct sock *sk;
 
 	/* Already marked as IPVS request or reply? */
 	if (skb->ipvs_property)
@@ -1708,12 +1709,11 @@
 	ip_vs_fill_iph_skb(af, skb, false, &iph);
 
 	/* Bad... Do not break raw sockets */
-	if (unlikely(skb->sk != NULL && hooknum == NF_INET_LOCAL_OUT &&
+	sk = skb_to_full_sk(skb);
+	if (unlikely(sk && hooknum == NF_INET_LOCAL_OUT &&
 		     af == AF_INET)) {
-		struct sock *sk = skb->sk;
-		struct inet_sock *inet = inet_sk(skb->sk);
 
-		if (inet && sk->sk_family == PF_INET && inet->nodefrag)
+		if (sk->sk_family == PF_INET && inet_sk(sk)->nodefrag)
 			return NF_ACCEPT;
 	}
 
diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
index 06eb48f..740cce4 100644
--- a/net/netfilter/nfnetlink_log.c
+++ b/net/netfilter/nfnetlink_log.c
@@ -825,7 +825,7 @@
 	struct net *net = sock_net(ctnl);
 	struct nfnl_log_net *log = nfnl_log_pernet(net);
 	int ret = 0;
-	u16 flags;
+	u16 flags = 0;
 
 	if (nfula[NFULA_CFG_CMD]) {
 		u_int8_t pf = nfmsg->nfgen_family;
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index 1067fb4..c7808fc 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -47,27 +47,34 @@
 	local_bh_enable();
 }
 
-static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static void nft_counter_fetch(const struct nft_counter_percpu __percpu *counter,
+			      struct nft_counter *total)
 {
-	struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
-	struct nft_counter_percpu *cpu_stats;
-	struct nft_counter total;
+	const struct nft_counter_percpu *cpu_stats;
 	u64 bytes, packets;
 	unsigned int seq;
 	int cpu;
 
-	memset(&total, 0, sizeof(total));
+	memset(total, 0, sizeof(*total));
 	for_each_possible_cpu(cpu) {
-		cpu_stats = per_cpu_ptr(priv->counter, cpu);
+		cpu_stats = per_cpu_ptr(counter, cpu);
 		do {
 			seq	= u64_stats_fetch_begin_irq(&cpu_stats->syncp);
 			bytes	= cpu_stats->counter.bytes;
 			packets	= cpu_stats->counter.packets;
 		} while (u64_stats_fetch_retry_irq(&cpu_stats->syncp, seq));
 
-		total.packets += packets;
-		total.bytes += bytes;
+		total->packets += packets;
+		total->bytes += bytes;
 	}
+}
+
+static int nft_counter_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+	struct nft_counter_percpu_priv *priv = nft_expr_priv(expr);
+	struct nft_counter total;
+
+	nft_counter_fetch(priv->counter, &total);
 
 	if (nla_put_be64(skb, NFTA_COUNTER_BYTES, cpu_to_be64(total.bytes)) ||
 	    nla_put_be64(skb, NFTA_COUNTER_PACKETS, cpu_to_be64(total.packets)))
@@ -118,6 +125,31 @@
 	free_percpu(priv->counter);
 }
 
+static int nft_counter_clone(struct nft_expr *dst, const struct nft_expr *src)
+{
+	struct nft_counter_percpu_priv *priv = nft_expr_priv(src);
+	struct nft_counter_percpu_priv *priv_clone = nft_expr_priv(dst);
+	struct nft_counter_percpu __percpu *cpu_stats;
+	struct nft_counter_percpu *this_cpu;
+	struct nft_counter total;
+
+	nft_counter_fetch(priv->counter, &total);
+
+	cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
+					      GFP_ATOMIC);
+	if (cpu_stats == NULL)
+		return ENOMEM;
+
+	preempt_disable();
+	this_cpu = this_cpu_ptr(cpu_stats);
+	this_cpu->counter.packets = total.packets;
+	this_cpu->counter.bytes = total.bytes;
+	preempt_enable();
+
+	priv_clone->counter = cpu_stats;
+	return 0;
+}
+
 static struct nft_expr_type nft_counter_type;
 static const struct nft_expr_ops nft_counter_ops = {
 	.type		= &nft_counter_type,
@@ -126,6 +158,7 @@
 	.init		= nft_counter_init,
 	.destroy	= nft_counter_destroy,
 	.dump		= nft_counter_dump,
+	.clone		= nft_counter_clone,
 };
 
 static struct nft_expr_type nft_counter_type __read_mostly = {
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 513a8ef..9dec3bd 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -50,8 +50,9 @@
 	}
 
 	ext = nft_set_elem_ext(set, elem);
-	if (priv->expr != NULL)
-		nft_expr_clone(nft_set_ext_expr(ext), priv->expr);
+	if (priv->expr != NULL &&
+	    nft_expr_clone(nft_set_ext_expr(ext), priv->expr) < 0)
+		return NULL;
 
 	return elem;
 }
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index af399ca..1cf928f 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -1741,6 +1741,20 @@
 		kfree_rcu(po->rollover, rcu);
 }
 
+static bool packet_extra_vlan_len_allowed(const struct net_device *dev,
+					  struct sk_buff *skb)
+{
+	/* Earlier code assumed this would be a VLAN pkt, double-check
+	 * this now that we have the actual packet in hand. We can only
+	 * do this check on Ethernet devices.
+	 */
+	if (unlikely(dev->type != ARPHRD_ETHER))
+		return false;
+
+	skb_reset_mac_header(skb);
+	return likely(eth_hdr(skb)->h_proto == htons(ETH_P_8021Q));
+}
+
 static const struct proto_ops packet_ops;
 
 static const struct proto_ops packet_ops_spkt;
@@ -1902,18 +1916,10 @@
 		goto retry;
 	}
 
-	if (len > (dev->mtu + dev->hard_header_len + extra_len)) {
-		/* Earlier code assumed this would be a VLAN pkt,
-		 * double-check this now that we have the actual
-		 * packet in hand.
-		 */
-		struct ethhdr *ehdr;
-		skb_reset_mac_header(skb);
-		ehdr = eth_hdr(skb);
-		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
-			err = -EMSGSIZE;
-			goto out_unlock;
-		}
+	if (len > (dev->mtu + dev->hard_header_len + extra_len) &&
+	    !packet_extra_vlan_len_allowed(dev, skb)) {
+		err = -EMSGSIZE;
+		goto out_unlock;
 	}
 
 	skb->protocol = proto;
@@ -2332,6 +2338,15 @@
 	return false;
 }
 
+static void tpacket_set_protocol(const struct net_device *dev,
+				 struct sk_buff *skb)
+{
+	if (dev->type == ARPHRD_ETHER) {
+		skb_reset_mac_header(skb);
+		skb->protocol = eth_hdr(skb)->h_proto;
+	}
+}
+
 static int tpacket_fill_skb(struct packet_sock *po, struct sk_buff *skb,
 		void *frame, struct net_device *dev, int size_max,
 		__be16 proto, unsigned char *addr, int hlen)
@@ -2368,8 +2383,6 @@
 	skb_reserve(skb, hlen);
 	skb_reset_network_header(skb);
 
-	if (!packet_use_direct_xmit(po))
-		skb_probe_transport_header(skb, 0);
 	if (unlikely(po->tp_tx_has_off)) {
 		int off_min, off_max, off;
 		off_min = po->tp_hdrlen - sizeof(struct sockaddr_ll);
@@ -2415,6 +2428,8 @@
 				dev->hard_header_len);
 		if (unlikely(err))
 			return err;
+		if (!skb->protocol)
+			tpacket_set_protocol(dev, skb);
 
 		data += dev->hard_header_len;
 		to_write -= dev->hard_header_len;
@@ -2449,6 +2464,8 @@
 		len = ((to_write > len_max) ? len_max : to_write);
 	}
 
+	skb_probe_transport_header(skb, 0);
+
 	return tp_len;
 }
 
@@ -2493,12 +2510,13 @@
 	if (unlikely(!(dev->flags & IFF_UP)))
 		goto out_put;
 
-	reserve = dev->hard_header_len + VLAN_HLEN;
+	if (po->sk.sk_socket->type == SOCK_RAW)
+		reserve = dev->hard_header_len;
 	size_max = po->tx_ring.frame_size
 		- (po->tp_hdrlen - sizeof(struct sockaddr_ll));
 
-	if (size_max > dev->mtu + reserve)
-		size_max = dev->mtu + reserve;
+	if (size_max > dev->mtu + reserve + VLAN_HLEN)
+		size_max = dev->mtu + reserve + VLAN_HLEN;
 
 	do {
 		ph = packet_current_frame(po, &po->tx_ring,
@@ -2525,18 +2543,10 @@
 		tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
 					  addr, hlen);
 		if (likely(tp_len >= 0) &&
-		    tp_len > dev->mtu + dev->hard_header_len) {
-			struct ethhdr *ehdr;
-			/* Earlier code assumed this would be a VLAN pkt,
-			 * double-check this now that we have the actual
-			 * packet in hand.
-			 */
+		    tp_len > dev->mtu + reserve &&
+		    !packet_extra_vlan_len_allowed(dev, skb))
+			tp_len = -EMSGSIZE;
 
-			skb_reset_mac_header(skb);
-			ehdr = eth_hdr(skb);
-			if (ehdr->h_proto != htons(ETH_P_8021Q))
-				tp_len = -EMSGSIZE;
-		}
 		if (unlikely(tp_len < 0)) {
 			if (po->tp_loss) {
 				__packet_set_status(po, ph,
@@ -2765,18 +2775,10 @@
 
 	sock_tx_timestamp(sk, &skb_shinfo(skb)->tx_flags);
 
-	if (!gso_type && (len > dev->mtu + reserve + extra_len)) {
-		/* Earlier code assumed this would be a VLAN pkt,
-		 * double-check this now that we have the actual
-		 * packet in hand.
-		 */
-		struct ethhdr *ehdr;
-		skb_reset_mac_header(skb);
-		ehdr = eth_hdr(skb);
-		if (ehdr->h_proto != htons(ETH_P_8021Q)) {
-			err = -EMSGSIZE;
-			goto out_free;
-		}
+	if (!gso_type && (len > dev->mtu + reserve + extra_len) &&
+	    !packet_extra_vlan_len_allowed(dev, skb)) {
+		err = -EMSGSIZE;
+		goto out_free;
 	}
 
 	skb->protocol = proto;
@@ -2807,8 +2809,8 @@
 		len += vnet_hdr_len;
 	}
 
-	if (!packet_use_direct_xmit(po))
-		skb_probe_transport_header(skb, reserve);
+	skb_probe_transport_header(skb, reserve);
+
 	if (unlikely(extra_len == 4))
 		skb->no_fcs = 1;
 
@@ -4107,7 +4109,7 @@
 		err = -EINVAL;
 		if (unlikely((int)req->tp_block_size <= 0))
 			goto out;
-		if (unlikely(req->tp_block_size & (PAGE_SIZE - 1)))
+		if (unlikely(!PAGE_ALIGNED(req->tp_block_size)))
 			goto out;
 		if (po->tp_version >= TPACKET_V3 &&
 		    (int)(req->tp_block_size -
@@ -4119,8 +4121,8 @@
 		if (unlikely(req->tp_frame_size & (TPACKET_ALIGNMENT - 1)))
 			goto out;
 
-		rb->frames_per_block = req->tp_block_size/req->tp_frame_size;
-		if (unlikely(rb->frames_per_block <= 0))
+		rb->frames_per_block = req->tp_block_size / req->tp_frame_size;
+		if (unlikely(rb->frames_per_block == 0))
 			goto out;
 		if (unlikely((rb->frames_per_block * req->tp_block_nr) !=
 					req->tp_frame_nr))
diff --git a/net/sctp/auth.c b/net/sctp/auth.c
index 4f15b7d..1543e39 100644
--- a/net/sctp/auth.c
+++ b/net/sctp/auth.c
@@ -809,8 +809,8 @@
 	if (!has_sha1)
 		return -EINVAL;
 
-	memcpy(ep->auth_hmacs_list->hmac_ids, &hmacs->shmac_idents[0],
-		hmacs->shmac_num_idents * sizeof(__u16));
+	for (i = 0; i < hmacs->shmac_num_idents; i++)
+		ep->auth_hmacs_list->hmac_ids[i] = htons(hmacs->shmac_idents[i]);
 	ep->auth_hmacs_list->param_hdr.length = htons(sizeof(sctp_paramhdr_t) +
 				hmacs->shmac_num_idents * sizeof(__u16));
 	return 0;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index aaa0b58..955ec15 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -441,6 +441,7 @@
 		if (state == TCP_LISTEN)
 			unix_release_sock(skb->sk, 1);
 		/* passed fds are erased in the kfree_skb hook	      */
+		UNIXCB(skb).consumed = skb->len;
 		kfree_skb(skb);
 	}
 
@@ -1799,6 +1800,7 @@
 		 * this - does no harm
 		 */
 		consume_skb(newskb);
+		newskb = NULL;
 	}
 
 	if (skb_append_pagefrags(skb, page, offset, size)) {
@@ -1811,8 +1813,11 @@
 	skb->truesize += size;
 	atomic_add(size, &sk->sk_wmem_alloc);
 
-	if (newskb)
+	if (newskb) {
+		spin_lock(&other->sk_receive_queue.lock);
 		__skb_queue_tail(&other->sk_receive_queue, newskb);
+		spin_unlock(&other->sk_receive_queue.lock);
+	}
 
 	unix_state_unlock(other);
 	mutex_unlock(&unix_sk(other)->readlock);
@@ -2072,6 +2077,7 @@
 
 	do {
 		int chunk;
+		bool drop_skb;
 		struct sk_buff *skb, *last;
 
 		unix_state_lock(sk);
@@ -2152,7 +2158,11 @@
 		}
 
 		chunk = min_t(unsigned int, unix_skb_len(skb) - skip, size);
+		skb_get(skb);
 		chunk = state->recv_actor(skb, skip, chunk, state);
+		drop_skb = !unix_skb_len(skb);
+		/* skb is only safe to use if !drop_skb */
+		consume_skb(skb);
 		if (chunk < 0) {
 			if (copied == 0)
 				copied = -EFAULT;
@@ -2161,6 +2171,18 @@
 		copied += chunk;
 		size -= chunk;
 
+		if (drop_skb) {
+			/* the skb was touched by a concurrent reader;
+			 * we should not expect anything from this skb
+			 * anymore and assume it invalid - we can be
+			 * sure it was dropped from the socket queue
+			 *
+			 * let's report a short read
+			 */
+			err = 0;
+			break;
+		}
+
 		/* Mark read part of skb as used */
 		if (!(flags & MSG_PEEK)) {
 			UNIXCB(skb).consumed += chunk;
diff --git a/samples/bpf/Makefile b/samples/bpf/Makefile
index 79b4596..edd638b 100644
--- a/samples/bpf/Makefile
+++ b/samples/bpf/Makefile
@@ -67,10 +67,13 @@
 # point this to your LLVM backend with bpf support
 LLC=$(srctree)/tools/bpf/llvm/bld/Debug+Asserts/bin/llc
 
+# asm/sysreg.h inline assmbly used by it is incompatible with llvm.
+# But, ehere is not easy way to fix it, so just exclude it since it is
+# useless for BPF samples.
 $(obj)/%.o: $(src)/%.c
 	clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
-		-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
+		-D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
 		-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=obj -o $@
 	clang $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(EXTRA_CFLAGS) \
-		-D__KERNEL__ -Wno-unused-value -Wno-pointer-sign \
+		-D__KERNEL__ -D__ASM_SYSREG_H -Wno-unused-value -Wno-pointer-sign \
 		-O2 -emit-llvm -c $< -o -| $(LLC) -march=bpf -filetype=asm -o $@.s
diff --git a/scripts/kernel-doc b/scripts/kernel-doc
index 125b906..638a38e 100755
--- a/scripts/kernel-doc
+++ b/scripts/kernel-doc
@@ -2711,7 +2711,7 @@
 
 # generate a sequence of code that will splice in highlighting information
 # using the s// operator.
-foreach my $k (keys @highlights) {
+for (my $k = 0; $k < @highlights; $k++) {
     my $pattern = $highlights[$k][0];
     my $result = $highlights[$k][1];
 #   print STDERR "scanning pattern:$pattern, highlight:($result)\n";
diff --git a/tools/net/Makefile b/tools/net/Makefile
index ee577ea..ddf8880 100644
--- a/tools/net/Makefile
+++ b/tools/net/Makefile
@@ -4,6 +4,9 @@
 LEX = flex
 YACC = bison
 
+CFLAGS += -Wall -O2
+CFLAGS += -D__EXPORTED_HEADERS__ -I../../include/uapi -I../../include
+
 %.yacc.c: %.y
 	$(YACC) -o $@ -d $<
 
@@ -12,15 +15,13 @@
 
 all : bpf_jit_disasm bpf_dbg bpf_asm
 
-bpf_jit_disasm : CFLAGS = -Wall -O2 -DPACKAGE='bpf_jit_disasm'
+bpf_jit_disasm : CFLAGS += -DPACKAGE='bpf_jit_disasm'
 bpf_jit_disasm : LDLIBS = -lopcodes -lbfd -ldl
 bpf_jit_disasm : bpf_jit_disasm.o
 
-bpf_dbg : CFLAGS = -Wall -O2
 bpf_dbg : LDLIBS = -lreadline
 bpf_dbg : bpf_dbg.o
 
-bpf_asm : CFLAGS = -Wall -O2 -I.
 bpf_asm : LDLIBS =
 bpf_asm : bpf_asm.o bpf_exp.yacc.o bpf_exp.lex.o
 bpf_exp.lex.o : bpf_exp.yacc.c
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index d8e4b20..0dac7e0 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -1173,9 +1173,9 @@
 	unsigned long long msr;
 	unsigned int ratio;
 
-	get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
+	get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
 
-	fprintf(stderr, "cpu%d: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
+	fprintf(stderr, "cpu%d: MSR_PLATFORM_INFO: 0x%08llx\n", base_cpu, msr);
 
 	ratio = (msr >> 40) & 0xFF;
 	fprintf(stderr, "%d * %.0f = %.0f MHz max efficiency frequency\n",
@@ -1807,7 +1807,7 @@
  *
  * MSR_SMI_COUNT                   0x00000034
  *
- * MSR_NHM_PLATFORM_INFO           0x000000ce
+ * MSR_PLATFORM_INFO               0x000000ce
  * MSR_NHM_SNB_PKG_CST_CFG_CTL     0x000000e2
  *
  * MSR_PKG_C3_RESIDENCY            0x000003f8
@@ -1876,7 +1876,7 @@
 	get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
 	pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
 
-	get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
+	get_msr(base_cpu, MSR_PLATFORM_INFO, &msr);
 	base_ratio = (msr >> 8) & 0xFF;
 
 	base_hz = base_ratio * bclk * 1000000;