Merge master.kernel.org:/pub/scm/linux/kernel/git/tglx/mtd-2.6
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index af4d1bc..94db257 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -163,15 +163,13 @@
 EXPORT_SYMBOL(giveup_spe);
 #endif /* CONFIG_SPE */
 
-#ifdef CONFIG_PPC64
-EXPORT_SYMBOL(__flush_icache_range);
-#else
+#ifndef CONFIG_PPC64
 EXPORT_SYMBOL(flush_instruction_cache);
-EXPORT_SYMBOL(flush_icache_range);
 EXPORT_SYMBOL(flush_tlb_kernel_range);
 EXPORT_SYMBOL(flush_tlb_page);
 EXPORT_SYMBOL(_tlbie);
 #endif
+EXPORT_SYMBOL(__flush_icache_range);
 EXPORT_SYMBOL(flush_dcache_range);
 
 #ifdef CONFIG_SMP
diff --git a/arch/ppc/kernel/ppc_ksyms.c b/arch/ppc/kernel/ppc_ksyms.c
index 66073f7..bb6a5c6 100644
--- a/arch/ppc/kernel/ppc_ksyms.c
+++ b/arch/ppc/kernel/ppc_ksyms.c
@@ -184,7 +184,7 @@
 
 EXPORT_SYMBOL(flush_instruction_cache);
 EXPORT_SYMBOL(giveup_fpu);
-EXPORT_SYMBOL(flush_icache_range);
+EXPORT_SYMBOL(__flush_icache_range);
 EXPORT_SYMBOL(flush_dcache_range);
 EXPORT_SYMBOL(flush_icache_user_range);
 EXPORT_SYMBOL(flush_dcache_page);
diff --git a/drivers/scsi/megaraid.c b/drivers/scsi/megaraid.c
index dfea346..f979252 100644
--- a/drivers/scsi/megaraid.c
+++ b/drivers/scsi/megaraid.c
@@ -380,23 +380,23 @@
 
 	spin_lock_irqsave(&adapter->lock, flags);
 	scb = mega_build_cmd(adapter, scmd, &busy);
+	if (!scb)
+		goto out;
 
-	if(scb) {
-		scb->state |= SCB_PENDQ;
-		list_add_tail(&scb->list, &adapter->pending_list);
+	scb->state |= SCB_PENDQ;
+	list_add_tail(&scb->list, &adapter->pending_list);
 
-		/*
-		 * Check if the HBA is in quiescent state, e.g., during a
-		 * delete logical drive opertion. If it is, don't run
-		 * the pending_list.
-		 */
-		if(atomic_read(&adapter->quiescent) == 0) {
-			mega_runpendq(adapter);
-		}
-		return 0;
-	}
+	/*
+	 * Check if the HBA is in quiescent state, e.g., during a
+	 * delete logical drive opertion. If it is, don't run
+	 * the pending_list.
+	 */
+	if (atomic_read(&adapter->quiescent) == 0)
+		mega_runpendq(adapter);
+
+	busy = 0;
+ out:
 	spin_unlock_irqrestore(&adapter->lock, flags);
-
 	return busy;
 }
 
@@ -4677,7 +4677,6 @@
 
 	adapter->flag = flag;
 	spin_lock_init(&adapter->lock);
-	scsi_assign_lock(host, &adapter->lock);
 
 	host->cmd_per_lun = max_cmd_per_lun;
 	host->max_sectors = max_sectors_per_io;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6a75a7a..74f90d7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -163,6 +163,7 @@
 #define VM_HUGETLB	0x00400000	/* Huge TLB Page VM */
 #define VM_NONLINEAR	0x00800000	/* Is non-linear (remap_file_pages) */
 #define VM_MAPPED_COPY	0x01000000	/* T if mapped copy of data (nommu mmap) */
+#define VM_INCOMPLETE	0x02000000	/* Strange partial PFN mapping marker */
 
 #ifndef VM_STACK_DEFAULT_FLAGS		/* arch can override this */
 #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
diff --git a/mm/memory.c b/mm/memory.c
index 9ab206b..990e7dc6 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -988,7 +988,7 @@
 				return i ? : -EFAULT;
 			}
 			if (pages) {
-				struct page *page = vm_normal_page(vma, start, *pte);
+				struct page *page = vm_normal_page(gate_vma, start, *pte);
 				pages[i] = page;
 				if (page)
 					get_page(page);
@@ -1147,6 +1147,95 @@
 }
 
 /*
+ * This is the old fallback for page remapping.
+ *
+ * For historical reasons, it only allows reserved pages. Only
+ * old drivers should use this, and they needed to mark their
+ * pages reserved for the old functions anyway.
+ */
+static int insert_page(struct mm_struct *mm, unsigned long addr, struct page *page, pgprot_t prot)
+{
+	int retval;
+	pgd_t * pgd;
+	pud_t * pud;
+	pmd_t * pmd;  
+	pte_t * pte;
+	spinlock_t *ptl;  
+
+	retval = -EINVAL;
+	if (PageAnon(page) || !PageReserved(page))
+		goto out;
+	retval = -ENOMEM;
+	flush_dcache_page(page);
+	pgd = pgd_offset(mm, addr);
+	pud = pud_alloc(mm, pgd, addr);
+	if (!pud)
+		goto out;
+	pmd = pmd_alloc(mm, pud, addr);
+	if (!pmd)
+		goto out;
+	pte = pte_alloc_map_lock(mm, pmd, addr, &ptl);
+	if (!pte)
+		goto out;
+	retval = -EBUSY;
+	if (!pte_none(*pte))
+		goto out_unlock;
+
+	/* Ok, finally just insert the thing.. */
+	get_page(page);
+	inc_mm_counter(mm, file_rss);
+	page_add_file_rmap(page);
+	set_pte_at(mm, addr, pte, mk_pte(page, prot));
+
+	retval = 0;
+out_unlock:
+	pte_unmap_unlock(pte, ptl);
+out:
+	return retval;
+}
+
+/*
+ * Somebody does a pfn remapping that doesn't actually work as a vma.
+ *
+ * Do it as individual pages instead, and warn about it. It's bad form,
+ * and very inefficient.
+ */
+static int incomplete_pfn_remap(struct vm_area_struct *vma,
+		unsigned long start, unsigned long end,
+		unsigned long pfn, pgprot_t prot)
+{
+	static int warn = 10;
+	struct page *page;
+	int retval;
+
+	if (!(vma->vm_flags & VM_INCOMPLETE)) {
+		if (warn) {
+			warn--;
+			printk("%s does an incomplete pfn remapping", current->comm);
+			dump_stack();
+		}
+	}
+	vma->vm_flags |= VM_INCOMPLETE | VM_IO | VM_RESERVED;
+
+	if (start < vma->vm_start || end > vma->vm_end)
+		return -EINVAL;
+
+	if (!pfn_valid(pfn))
+		return -EINVAL;
+
+	retval = 0;
+	page = pfn_to_page(pfn);
+	while (start < end) {
+		retval = insert_page(vma->vm_mm, start, page, prot);
+		if (retval < 0)
+			break;
+		start += PAGE_SIZE;
+		page++;
+	}
+	return retval;
+}
+
+/*
  * maps a range of physical memory into the requested pages. the old
  * mappings are removed. any references to nonexistent pages results
  * in null mappings (currently treated as "copy-on-access")
@@ -1220,6 +1309,9 @@
 	struct mm_struct *mm = vma->vm_mm;
 	int err;
 
+	if (addr != vma->vm_start || end != vma->vm_end)
+		return incomplete_pfn_remap(vma, addr, end, pfn, prot);
+
 	/*
 	 * Physically remapped pages are special. Tell the
 	 * rest of the world about it:
@@ -1345,7 +1437,7 @@
 		int reuse = can_share_swap_page(old_page);
 		unlock_page(old_page);
 		if (reuse) {
-			flush_cache_page(vma, address, pfn);
+			flush_cache_page(vma, address, pte_pfn(orig_pte));
 			entry = pte_mkyoung(orig_pte);
 			entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 			ptep_set_access_flags(vma, address, page_table, entry, 1);
@@ -1389,7 +1481,7 @@
 			}
 		} else
 			inc_mm_counter(mm, anon_rss);
-		flush_cache_page(vma, address, pfn);
+		flush_cache_page(vma, address, pte_pfn(orig_pte));
 		entry = mk_pte(new_page, vma->vm_page_prot);
 		entry = maybe_mkwrite(pte_mkdirty(entry), vma);
 		ptep_establish(vma, address, page_table, entry);
diff --git a/mm/rmap.c b/mm/rmap.c
index 491ac35..f853c6d 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -641,7 +641,7 @@
 			continue;
 
 		/* Nuke the page table entry. */
-		flush_cache_page(vma, address, pfn);
+		flush_cache_page(vma, address, pte_pfn(*pte));
 		pteval = ptep_clear_flush(vma, address, pte);
 
 		/* If nonlinear, store the file page offset in the pte. */
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index a16064b..76ff9f4 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -985,6 +985,8 @@
 			}
 
 			/* Rule 4: Prefer home address -- not implemented yet */
+			if (hiscore.rule < 4)
+				hiscore.rule++;
 
 			/* Rule 5: Prefer outgoing interface */
 			if (hiscore.rule < 5) {