Merge branch 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull perf tool fixes from Ingo Molnar.
* 'perf-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
perf kvm: Finding struct machine fails for PERF_RECORD_MMAP
perf annotate: Validate addr in symbol__inc_addr_samples
perf hists browser: Fix NULL deref in hists browsing code
perf hists: Catch and handle out-of-date hist entry maps.
perf annotate: Fix hist decay
perf top: Add intel_idle to the skip list
diff --git a/Documentation/ABI/testing/sysfs-block-rssd b/Documentation/ABI/testing/sysfs-block-rssd
new file mode 100644
index 0000000..d535757
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-block-rssd
@@ -0,0 +1,18 @@
+What: /sys/block/rssd*/registers
+Date: March 2012
+KernelVersion: 3.3
+Contact: Asai Thambi S P <asamymuthupa@micron.com>
+Description: This is a read-only file. Dumps below driver information and
+ hardware registers.
+ - S ACTive
+ - Command Issue
+ - Allocated
+ - Completed
+ - PORT IRQ STAT
+ - HOST IRQ STAT
+
+What: /sys/block/rssd*/status
+Date: April 2012
+KernelVersion: 3.4
+Contact: Asai Thambi S P <asamymuthupa@micron.com>
+Description: This is a read-only file. Indicates the status of the device.
diff --git a/Documentation/ABI/testing/sysfs-cfq-target-latency b/Documentation/ABI/testing/sysfs-cfq-target-latency
new file mode 100644
index 0000000..df0f782
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-cfq-target-latency
@@ -0,0 +1,8 @@
+What: /sys/block/<device>/iosched/target_latency
+Date: March 2012
+contact: Tao Ma <boyu.mt@taobao.com>
+Description:
+ The /sys/block/<device>/iosched/target_latency only exists
+ when the user sets cfq to /sys/block/<device>/scheduler.
+ It contains an estimated latency time for the cfq. cfq will
+ use it to calculate the time slice used for every task.
diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
index f62251e..3bb7ffe 100644
--- a/arch/alpha/include/asm/atomic.h
+++ b/arch/alpha/include/asm/atomic.h
@@ -3,6 +3,7 @@
#include <linux/types.h>
#include <asm/barrier.h>
+#include <asm/cmpxchg.h>
/*
* Atomic operations that C can't guarantee us. Useful for
@@ -168,73 +169,6 @@
return result;
}
-/*
- * Atomic exchange routines.
- */
-
-#define __ASM__MB
-#define ____xchg(type, args...) __xchg ## type ## _local(args)
-#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
-#include <asm/xchg.h>
-
-#define xchg_local(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
- sizeof(*(ptr))); \
- })
-
-#define cmpxchg_local(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, \
- sizeof(*(ptr))); \
- })
-
-#define cmpxchg64_local(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg_local((ptr), (o), (n)); \
- })
-
-#ifdef CONFIG_SMP
-#undef __ASM__MB
-#define __ASM__MB "\tmb\n"
-#endif
-#undef ____xchg
-#undef ____cmpxchg
-#define ____xchg(type, args...) __xchg ##type(args)
-#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
-#include <asm/xchg.h>
-
-#define xchg(ptr,x) \
- ({ \
- __typeof__(*(ptr)) _x_ = (x); \
- (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
- sizeof(*(ptr))); \
- })
-
-#define cmpxchg(ptr, o, n) \
- ({ \
- __typeof__(*(ptr)) _o_ = (o); \
- __typeof__(*(ptr)) _n_ = (n); \
- (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
- (unsigned long)_n_, sizeof(*(ptr)));\
- })
-
-#define cmpxchg64(ptr, o, n) \
- ({ \
- BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
- cmpxchg((ptr), (o), (n)); \
- })
-
-#undef __ASM__MB
-#undef ____cmpxchg
-
-#define __HAVE_ARCH_CMPXCHG 1
-
#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
diff --git a/arch/alpha/include/asm/cmpxchg.h b/arch/alpha/include/asm/cmpxchg.h
new file mode 100644
index 0000000..429e8cd
--- /dev/null
+++ b/arch/alpha/include/asm/cmpxchg.h
@@ -0,0 +1,71 @@
+#ifndef _ALPHA_CMPXCHG_H
+#define _ALPHA_CMPXCHG_H
+
+/*
+ * Atomic exchange routines.
+ */
+
+#define __ASM__MB
+#define ____xchg(type, args...) __xchg ## type ## _local(args)
+#define ____cmpxchg(type, args...) __cmpxchg ## type ## _local(args)
+#include <asm/xchg.h>
+
+#define xchg_local(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_local((ptr), (unsigned long)_x_, \
+ sizeof(*(ptr))); \
+})
+
+#define cmpxchg_local(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_local((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, \
+ sizeof(*(ptr))); \
+})
+
+#define cmpxchg64_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_local((ptr), (o), (n)); \
+})
+
+#ifdef CONFIG_SMP
+#undef __ASM__MB
+#define __ASM__MB "\tmb\n"
+#endif
+#undef ____xchg
+#undef ____cmpxchg
+#define ____xchg(type, args...) __xchg ##type(args)
+#define ____cmpxchg(type, args...) __cmpxchg ##type(args)
+#include <asm/xchg.h>
+
+#define xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, \
+ sizeof(*(ptr))); \
+})
+
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
+ (unsigned long)_n_, sizeof(*(ptr)));\
+})
+
+#define cmpxchg64(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+})
+
+#undef __ASM__MB
+#undef ____cmpxchg
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+#endif /* _ALPHA_CMPXCHG_H */
diff --git a/arch/alpha/include/asm/xchg.h b/arch/alpha/include/asm/xchg.h
index 1d1b436..0ca9724 100644
--- a/arch/alpha/include/asm/xchg.h
+++ b/arch/alpha/include/asm/xchg.h
@@ -1,10 +1,10 @@
-#ifndef _ALPHA_ATOMIC_H
+#ifndef _ALPHA_CMPXCHG_H
#error Do not include xchg.h directly!
#else
/*
* xchg/xchg_local and cmpxchg/cmpxchg_local share the same code
* except that local version do not have the expensive memory barrier.
- * So this file is included twice from asm/system.h.
+ * So this file is included twice from asm/cmpxchg.h.
*/
/*
diff --git a/arch/ia64/include/asm/cmpxchg.h b/arch/ia64/include/asm/cmpxchg.h
index 4c96187..4f37dbb 100644
--- a/arch/ia64/include/asm/cmpxchg.h
+++ b/arch/ia64/include/asm/cmpxchg.h
@@ -1 +1,147 @@
-#include <asm/intrinsics.h>
+#ifndef _ASM_IA64_CMPXCHG_H
+#define _ASM_IA64_CMPXCHG_H
+
+/*
+ * Compare/Exchange, forked from asm/intrinsics.h
+ * which was:
+ *
+ * Copyright (C) 2002-2003 Hewlett-Packard Co
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ */
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+/* include compiler specific intrinsics */
+#include <asm/ia64regs.h>
+#ifdef __INTEL_COMPILER
+# include <asm/intel_intrin.h>
+#else
+# include <asm/gcc_intrin.h>
+#endif
+
+/*
+ * This function doesn't exist, so you'll get a linker error if
+ * something tries to do an invalid xchg().
+ */
+extern void ia64_xchg_called_with_bad_pointer(void);
+
+#define __xchg(x, ptr, size) \
+({ \
+ unsigned long __xchg_result; \
+ \
+ switch (size) { \
+ case 1: \
+ __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
+ break; \
+ \
+ case 2: \
+ __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
+ break; \
+ \
+ case 4: \
+ __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
+ break; \
+ \
+ case 8: \
+ __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
+ break; \
+ default: \
+ ia64_xchg_called_with_bad_pointer(); \
+ } \
+ __xchg_result; \
+})
+
+#define xchg(ptr, x) \
+((__typeof__(*(ptr))) __xchg((unsigned long) (x), (ptr), sizeof(*(ptr))))
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+
+#define __HAVE_ARCH_CMPXCHG 1
+
+/*
+ * This function doesn't exist, so you'll get a linker error
+ * if something tries to do an invalid cmpxchg().
+ */
+extern long ia64_cmpxchg_called_with_bad_pointer(void);
+
+#define ia64_cmpxchg(sem, ptr, old, new, size) \
+({ \
+ __u64 _o_, _r_; \
+ \
+ switch (size) { \
+ case 1: \
+ _o_ = (__u8) (long) (old); \
+ break; \
+ case 2: \
+ _o_ = (__u16) (long) (old); \
+ break; \
+ case 4: \
+ _o_ = (__u32) (long) (old); \
+ break; \
+ case 8: \
+ _o_ = (__u64) (long) (old); \
+ break; \
+ default: \
+ break; \
+ } \
+ switch (size) { \
+ case 1: \
+ _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
+ break; \
+ \
+ case 2: \
+ _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
+ break; \
+ \
+ case 4: \
+ _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
+ break; \
+ \
+ case 8: \
+ _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
+ break; \
+ \
+ default: \
+ _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
+ break; \
+ } \
+ (__typeof__(old)) _r_; \
+})
+
+#define cmpxchg_acq(ptr, o, n) \
+ ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
+#define cmpxchg_rel(ptr, o, n) \
+ ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
+
+/* for compatibility with other platforms: */
+#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
+
+#define cmpxchg_local cmpxchg
+#define cmpxchg64_local cmpxchg64
+
+#ifdef CONFIG_IA64_DEBUG_CMPXCHG
+# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
+# define CMPXCHG_BUGCHECK(v) \
+do { \
+ if (_cmpxchg_bugcheck_count-- <= 0) { \
+ void *ip; \
+ extern int printk(const char *fmt, ...); \
+ ip = (void *) ia64_getreg(_IA64_REG_IP); \
+ printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v));\
+ break; \
+ } \
+} while (0)
+#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
+# define CMPXCHG_BUGCHECK_DECL
+# define CMPXCHG_BUGCHECK(v)
+#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_IA64_CMPXCHG_H */
diff --git a/arch/ia64/include/asm/intrinsics.h b/arch/ia64/include/asm/intrinsics.h
index e4076b5..d129e36 100644
--- a/arch/ia64/include/asm/intrinsics.h
+++ b/arch/ia64/include/asm/intrinsics.h
@@ -18,6 +18,7 @@
#else
# include <asm/gcc_intrin.h>
#endif
+#include <asm/cmpxchg.h>
#define ia64_native_get_psr_i() (ia64_native_getreg(_IA64_REG_PSR) & IA64_PSR_I)
@@ -81,119 +82,6 @@
#define ia64_fetch_and_add(i,v) (ia64_fetchadd(i, v, rel) + (i)) /* return new value */
-/*
- * This function doesn't exist, so you'll get a linker error if
- * something tries to do an invalid xchg().
- */
-extern void ia64_xchg_called_with_bad_pointer (void);
-
-#define __xchg(x,ptr,size) \
-({ \
- unsigned long __xchg_result; \
- \
- switch (size) { \
- case 1: \
- __xchg_result = ia64_xchg1((__u8 *)ptr, x); \
- break; \
- \
- case 2: \
- __xchg_result = ia64_xchg2((__u16 *)ptr, x); \
- break; \
- \
- case 4: \
- __xchg_result = ia64_xchg4((__u32 *)ptr, x); \
- break; \
- \
- case 8: \
- __xchg_result = ia64_xchg8((__u64 *)ptr, x); \
- break; \
- default: \
- ia64_xchg_called_with_bad_pointer(); \
- } \
- __xchg_result; \
-})
-
-#define xchg(ptr,x) \
- ((__typeof__(*(ptr))) __xchg ((unsigned long) (x), (ptr), sizeof(*(ptr))))
-
-/*
- * Atomic compare and exchange. Compare OLD with MEM, if identical,
- * store NEW in MEM. Return the initial value in MEM. Success is
- * indicated by comparing RETURN with OLD.
- */
-
-#define __HAVE_ARCH_CMPXCHG 1
-
-/*
- * This function doesn't exist, so you'll get a linker error
- * if something tries to do an invalid cmpxchg().
- */
-extern long ia64_cmpxchg_called_with_bad_pointer (void);
-
-#define ia64_cmpxchg(sem,ptr,old,new,size) \
-({ \
- __u64 _o_, _r_; \
- \
- switch (size) { \
- case 1: _o_ = (__u8 ) (long) (old); break; \
- case 2: _o_ = (__u16) (long) (old); break; \
- case 4: _o_ = (__u32) (long) (old); break; \
- case 8: _o_ = (__u64) (long) (old); break; \
- default: break; \
- } \
- switch (size) { \
- case 1: \
- _r_ = ia64_cmpxchg1_##sem((__u8 *) ptr, new, _o_); \
- break; \
- \
- case 2: \
- _r_ = ia64_cmpxchg2_##sem((__u16 *) ptr, new, _o_); \
- break; \
- \
- case 4: \
- _r_ = ia64_cmpxchg4_##sem((__u32 *) ptr, new, _o_); \
- break; \
- \
- case 8: \
- _r_ = ia64_cmpxchg8_##sem((__u64 *) ptr, new, _o_); \
- break; \
- \
- default: \
- _r_ = ia64_cmpxchg_called_with_bad_pointer(); \
- break; \
- } \
- (__typeof__(old)) _r_; \
-})
-
-#define cmpxchg_acq(ptr, o, n) \
- ia64_cmpxchg(acq, (ptr), (o), (n), sizeof(*(ptr)))
-#define cmpxchg_rel(ptr, o, n) \
- ia64_cmpxchg(rel, (ptr), (o), (n), sizeof(*(ptr)))
-
-/* for compatibility with other platforms: */
-#define cmpxchg(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
-#define cmpxchg64(ptr, o, n) cmpxchg_acq((ptr), (o), (n))
-
-#define cmpxchg_local cmpxchg
-#define cmpxchg64_local cmpxchg64
-
-#ifdef CONFIG_IA64_DEBUG_CMPXCHG
-# define CMPXCHG_BUGCHECK_DECL int _cmpxchg_bugcheck_count = 128;
-# define CMPXCHG_BUGCHECK(v) \
- do { \
- if (_cmpxchg_bugcheck_count-- <= 0) { \
- void *ip; \
- extern int printk(const char *fmt, ...); \
- ip = (void *) ia64_getreg(_IA64_REG_IP); \
- printk("CMPXCHG_BUGCHECK: stuck at %p on word %p\n", ip, (v)); \
- break; \
- } \
- } while (0)
-#else /* !CONFIG_IA64_DEBUG_CMPXCHG */
-# define CMPXCHG_BUGCHECK_DECL
-# define CMPXCHG_BUGCHECK(v)
-#endif /* !CONFIG_IA64_DEBUG_CMPXCHG */
-
#endif
#ifdef __KERNEL__
diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
index fea13c7..b93c2c9 100644
--- a/arch/sparc/kernel/ds.c
+++ b/arch/sparc/kernel/ds.c
@@ -1264,4 +1264,4 @@
return vio_register_driver(&ds_driver);
}
-subsys_initcall(ds_init);
+fs_initcall(ds_init);
diff --git a/arch/sparc/kernel/rtrap_64.S b/arch/sparc/kernel/rtrap_64.S
index 77f1b95..9171fc2 100644
--- a/arch/sparc/kernel/rtrap_64.S
+++ b/arch/sparc/kernel/rtrap_64.S
@@ -20,11 +20,6 @@
.text
.align 32
-__handle_softirq:
- call do_softirq
- nop
- ba,a,pt %xcc, __handle_softirq_continue
- nop
__handle_preemption:
call schedule
wrpr %g0, RTRAP_PSTATE, %pstate
@@ -89,9 +84,7 @@
cmp %l1, 0
/* mm/ultra.S:xcall_report_regs KNOWS about this load. */
- bne,pn %icc, __handle_softirq
ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %l1
-__handle_softirq_continue:
rtrap_xcall:
sethi %hi(0xf << 20), %l4
and %l1, %l4, %l4
diff --git a/block/blk-core.c b/block/blk-core.c
index 3a78b00..1f61b74 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -483,7 +483,7 @@
if (!q)
return NULL;
- q->id = ida_simple_get(&blk_queue_ida, 0, 0, GFP_KERNEL);
+ q->id = ida_simple_get(&blk_queue_ida, 0, 0, gfp_mask);
if (q->id < 0)
goto fail_q;
@@ -1277,7 +1277,8 @@
list_for_each_entry_reverse(rq, &plug->list, queuelist) {
int el_ret;
- (*request_count)++;
+ if (rq->q == q)
+ (*request_count)++;
if (rq->q != q || !blk_rq_merge_ok(rq, bio))
continue;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 5eed6a7..f2ddb94 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -1218,7 +1218,7 @@
struct bio_list bl;
struct bio *bio;
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
bio_list_init(&bl);
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 4572952..3c38536 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -295,6 +295,7 @@
unsigned int cfq_slice_idle;
unsigned int cfq_group_idle;
unsigned int cfq_latency;
+ unsigned int cfq_target_latency;
/*
* Fallback dummy cfqq for extreme OOM conditions
@@ -604,7 +605,7 @@
{
struct cfq_rb_root *st = &cfqd->grp_service_tree;
- return cfq_target_latency * cfqg->weight / st->total_weight;
+ return cfqd->cfq_target_latency * cfqg->weight / st->total_weight;
}
static inline unsigned
@@ -2271,7 +2272,8 @@
* to have higher weight. A more accurate thing would be to
* calculate system wide asnc/sync ratio.
*/
- tmp = cfq_target_latency * cfqg_busy_async_queues(cfqd, cfqg);
+ tmp = cfqd->cfq_target_latency *
+ cfqg_busy_async_queues(cfqd, cfqg);
tmp = tmp/cfqd->busy_queues;
slice = min_t(unsigned, slice, tmp);
@@ -3737,6 +3739,7 @@
cfqd->cfq_back_penalty = cfq_back_penalty;
cfqd->cfq_slice[0] = cfq_slice_async;
cfqd->cfq_slice[1] = cfq_slice_sync;
+ cfqd->cfq_target_latency = cfq_target_latency;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
cfqd->cfq_group_idle = cfq_group_idle;
@@ -3788,6 +3791,7 @@
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
SHOW_FUNCTION(cfq_low_latency_show, cfqd->cfq_latency, 0);
+SHOW_FUNCTION(cfq_target_latency_show, cfqd->cfq_target_latency, 1);
#undef SHOW_FUNCTION
#define STORE_FUNCTION(__FUNC, __PTR, MIN, MAX, __CONV) \
@@ -3821,6 +3825,7 @@
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_low_latency_store, &cfqd->cfq_latency, 0, 1, 0);
+STORE_FUNCTION(cfq_target_latency_store, &cfqd->cfq_target_latency, 1, UINT_MAX, 1);
#undef STORE_FUNCTION
#define CFQ_ATTR(name) \
@@ -3838,6 +3843,7 @@
CFQ_ATTR(slice_idle),
CFQ_ATTR(group_idle),
CFQ_ATTR(low_latency),
+ CFQ_ATTR(target_latency),
__ATTR_NULL
};
diff --git a/drivers/block/cciss_scsi.c b/drivers/block/cciss_scsi.c
index e820b68..acda773b 100644
--- a/drivers/block/cciss_scsi.c
+++ b/drivers/block/cciss_scsi.c
@@ -866,6 +866,7 @@
sh->can_queue = cciss_tape_cmds;
sh->sg_tablesize = h->maxsgentries;
sh->max_cmd_len = MAX_COMMAND_SIZE;
+ sh->max_sectors = h->cciss_max_sectors;
((struct cciss_scsi_adapter_data_t *)
h->scsi_ctlr)->scsi_host = sh;
@@ -1410,7 +1411,7 @@
/* track how many SG entries we are using */
if (request_nsgs > h->maxSG)
h->maxSG = request_nsgs;
- c->Header.SGTotal = (__u8) request_nsgs + chained;
+ c->Header.SGTotal = (u16) request_nsgs + chained;
if (request_nsgs > h->max_cmd_sgentries)
c->Header.SGList = h->max_cmd_sgentries;
else
diff --git a/drivers/block/mtip32xx/Kconfig b/drivers/block/mtip32xx/Kconfig
index b5dd14e..0ba837f 100644
--- a/drivers/block/mtip32xx/Kconfig
+++ b/drivers/block/mtip32xx/Kconfig
@@ -4,6 +4,6 @@
config BLK_DEV_PCIESSD_MTIP32XX
tristate "Block Device Driver for Micron PCIe SSDs"
- depends on HOTPLUG_PCI_PCIE
+ depends on PCI
help
This enables the block driver for Micron PCIe SSDs.
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 8eb81c9..00f9fc9 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -36,6 +36,7 @@
#include <linux/idr.h>
#include <linux/kthread.h>
#include <../drivers/ata/ahci.h>
+#include <linux/export.h>
#include "mtip32xx.h"
#define HW_CMD_SLOT_SZ (MTIP_MAX_COMMAND_SLOTS * 32)
@@ -44,6 +45,7 @@
#define HW_PORT_PRIV_DMA_SZ \
(HW_CMD_SLOT_SZ + HW_CMD_TBL_AR_SZ + AHCI_RX_FIS_SZ)
+#define HOST_CAP_NZDMA (1 << 19)
#define HOST_HSORG 0xFC
#define HSORG_DISABLE_SLOTGRP_INTR (1<<24)
#define HSORG_DISABLE_SLOTGRP_PXIS (1<<16)
@@ -139,6 +141,12 @@
int group = 0, commandslot = 0, commandindex = 0;
struct mtip_cmd *command;
struct mtip_port *port = dd->port;
+ static int in_progress;
+
+ if (in_progress)
+ return;
+
+ in_progress = 1;
for (group = 0; group < 4; group++) {
for (commandslot = 0; commandslot < 32; commandslot++) {
@@ -165,7 +173,8 @@
up(&port->cmd_slot);
- atomic_set(&dd->drv_cleanup_done, true);
+ set_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag);
+ in_progress = 0;
}
/*
@@ -262,6 +271,9 @@
&& time_before(jiffies, timeout))
mdelay(1);
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))
+ return -1;
+
if (readl(dd->mmio + HOST_CTL) & HOST_RESET)
return -1;
@@ -294,6 +306,10 @@
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
spin_unlock_irqrestore(&port->cmd_issue_lock, flags);
+
+ /* Set the command's timeout value.*/
+ port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
+ MTIP_NCQ_COMMAND_TIMEOUT_MS);
}
/*
@@ -420,7 +436,12 @@
writel(0xFFFFFFFF, port->completed[i]);
/* Clear any pending interrupts for this port */
- writel(readl(port->mmio + PORT_IRQ_STAT), port->mmio + PORT_IRQ_STAT);
+ writel(readl(port->dd->mmio + PORT_IRQ_STAT),
+ port->dd->mmio + PORT_IRQ_STAT);
+
+ /* Clear any pending interrupts on the HBA. */
+ writel(readl(port->dd->mmio + HOST_IRQ_STAT),
+ port->dd->mmio + HOST_IRQ_STAT);
/* Enable port interrupts */
writel(DEF_PORT_IRQ, port->mmio + PORT_IRQ_MASK);
@@ -447,6 +468,9 @@
&& time_before(jiffies, timeout))
;
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+ return;
+
/*
* Chip quirk: escalate to hba reset if
* PxCMD.CR not clear after 500 ms
@@ -475,6 +499,9 @@
while (time_before(jiffies, timeout))
;
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+ return;
+
/* Clear PxSCTL.DET */
writel(readl(port->mmio + PORT_SCR_CTL) & ~1,
port->mmio + PORT_SCR_CTL);
@@ -486,15 +513,35 @@
&& time_before(jiffies, timeout))
;
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+ return;
+
if ((readl(port->mmio + PORT_SCR_STAT) & 0x01) == 0)
dev_warn(&port->dd->pdev->dev,
"COM reset failed\n");
- /* Clear SError, the PxSERR.DIAG.x should be set so clear it */
- writel(readl(port->mmio + PORT_SCR_ERR), port->mmio + PORT_SCR_ERR);
+ mtip_init_port(port);
+ mtip_start_port(port);
- /* Enable the DMA engine */
- mtip_enable_engine(port, 1);
+}
+
+/*
+ * Helper function for tag logging
+ */
+static void print_tags(struct driver_data *dd,
+ char *msg,
+ unsigned long *tagbits,
+ int cnt)
+{
+ unsigned char tagmap[128];
+ int group, tagmap_len = 0;
+
+ memset(tagmap, 0, sizeof(tagmap));
+ for (group = SLOTBITS_IN_LONGS; group > 0; group--)
+ tagmap_len = sprintf(tagmap + tagmap_len, "%016lX ",
+ tagbits[group-1]);
+ dev_warn(&dd->pdev->dev,
+ "%d command(s) %s: tagmap [%s]", cnt, msg, tagmap);
}
/*
@@ -514,15 +561,18 @@
int tag, cmdto_cnt = 0;
unsigned int bit, group;
unsigned int num_command_slots = port->dd->slot_groups * 32;
+ unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
if (unlikely(!port))
return;
- if (atomic_read(&port->dd->resumeflag) == true) {
+ if (test_bit(MTIP_DDF_RESUME_BIT, &port->dd->dd_flag)) {
mod_timer(&port->cmd_timer,
jiffies + msecs_to_jiffies(30000));
return;
}
+ /* clear the tag accumulator */
+ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
for (tag = 0; tag < num_command_slots; tag++) {
/*
@@ -540,12 +590,10 @@
command = &port->commands[tag];
fis = (struct host_to_dev_fis *) command->command;
- dev_warn(&port->dd->pdev->dev,
- "Timeout for command tag %d\n", tag);
-
+ set_bit(tag, tagaccum);
cmdto_cnt++;
if (cmdto_cnt == 1)
- set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
/*
* Clear the completed bit. This should prevent
@@ -578,15 +626,29 @@
}
}
- if (cmdto_cnt) {
- dev_warn(&port->dd->pdev->dev,
- "%d commands timed out: restarting port",
- cmdto_cnt);
+ if (cmdto_cnt && !test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ print_tags(port->dd, "timed out", tagaccum, cmdto_cnt);
+
mtip_restart_port(port);
- clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
}
+ if (port->ic_pause_timer) {
+ to = port->ic_pause_timer + msecs_to_jiffies(1000);
+ if (time_after(jiffies, to)) {
+ if (!test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags)) {
+ port->ic_pause_timer = 0;
+ clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+ }
+
+
+ }
+ }
+
/* Restart the timer */
mod_timer(&port->cmd_timer,
jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
@@ -681,23 +743,18 @@
complete(waiting);
}
-/*
- * Helper function for tag logging
- */
-static void print_tags(struct driver_data *dd,
- char *msg,
- unsigned long *tagbits)
+static void mtip_null_completion(struct mtip_port *port,
+ int tag,
+ void *data,
+ int status)
{
- unsigned int tag, count = 0;
-
- for (tag = 0; tag < (dd->slot_groups) * 32; tag++) {
- if (test_bit(tag, tagbits))
- count++;
- }
- if (count)
- dev_info(&dd->pdev->dev, "%s [%i tags]\n", msg, count);
+ return;
}
+static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
+ dma_addr_t buffer_dma, unsigned int sectors);
+static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
+ struct smart_attr *attrib);
/*
* Handle an error.
*
@@ -708,12 +765,16 @@
*/
static void mtip_handle_tfe(struct driver_data *dd)
{
- int group, tag, bit, reissue;
+ int group, tag, bit, reissue, rv;
struct mtip_port *port;
- struct mtip_cmd *command;
+ struct mtip_cmd *cmd;
u32 completed;
struct host_to_dev_fis *fis;
unsigned long tagaccum[SLOTBITS_IN_LONGS];
+ unsigned int cmd_cnt = 0;
+ unsigned char *buf;
+ char *fail_reason = NULL;
+ int fail_all_ncq_write = 0, fail_all_ncq_cmds = 0;
dev_warn(&dd->pdev->dev, "Taskfile error\n");
@@ -722,8 +783,11 @@
/* Stop the timer to prevent command timeouts. */
del_timer(&port->cmd_timer);
+ /* clear the tag accumulator */
+ memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
+
/* Set eh_active */
- set_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ set_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
/* Loop through all the groups */
for (group = 0; group < dd->slot_groups; group++) {
@@ -732,9 +796,6 @@
/* clear completed status register in the hardware.*/
writel(completed, port->completed[group]);
- /* clear the tag accumulator */
- memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
-
/* Process successfully completed commands */
for (bit = 0; bit < 32 && completed; bit++) {
if (!(completed & (1<<bit)))
@@ -745,13 +806,14 @@
if (tag == MTIP_TAG_INTERNAL)
continue;
- command = &port->commands[tag];
- if (likely(command->comp_func)) {
+ cmd = &port->commands[tag];
+ if (likely(cmd->comp_func)) {
set_bit(tag, tagaccum);
- atomic_set(&port->commands[tag].active, 0);
- command->comp_func(port,
+ cmd_cnt++;
+ atomic_set(&cmd->active, 0);
+ cmd->comp_func(port,
tag,
- command->comp_data,
+ cmd->comp_data,
0);
} else {
dev_err(&port->dd->pdev->dev,
@@ -765,12 +827,45 @@
}
}
}
- print_tags(dd, "TFE tags completed:", tagaccum);
+
+ print_tags(dd, "completed (TFE)", tagaccum, cmd_cnt);
/* Restart the port */
mdelay(20);
mtip_restart_port(port);
+ /* Trying to determine the cause of the error */
+ rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
+ dd->port->log_buf,
+ dd->port->log_buf_dma, 1);
+ if (rv) {
+ dev_warn(&dd->pdev->dev,
+ "Error in READ LOG EXT (10h) command\n");
+ /* non-critical error, don't fail the load */
+ } else {
+ buf = (unsigned char *)dd->port->log_buf;
+ if (buf[259] & 0x1) {
+ dev_info(&dd->pdev->dev,
+ "Write protect bit is set.\n");
+ set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
+ fail_all_ncq_write = 1;
+ fail_reason = "write protect";
+ }
+ if (buf[288] == 0xF7) {
+ dev_info(&dd->pdev->dev,
+ "Exceeded Tmax, drive in thermal shutdown.\n");
+ set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
+ fail_all_ncq_cmds = 1;
+ fail_reason = "thermal shutdown";
+ }
+ if (buf[288] == 0xBF) {
+ dev_info(&dd->pdev->dev,
+ "Drive indicates rebuild has failed.\n");
+ fail_all_ncq_cmds = 1;
+ fail_reason = "rebuild failed";
+ }
+ }
+
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
@@ -779,32 +874,47 @@
for (bit = 0; bit < 32; bit++) {
reissue = 1;
tag = (group << 5) + bit;
+ cmd = &port->commands[tag];
/* If the active bit is set re-issue the command */
- if (atomic_read(&port->commands[tag].active) == 0)
+ if (atomic_read(&cmd->active) == 0)
continue;
- fis = (struct host_to_dev_fis *)
- port->commands[tag].command;
+ fis = (struct host_to_dev_fis *)cmd->command;
/* Should re-issue? */
if (tag == MTIP_TAG_INTERNAL ||
fis->command == ATA_CMD_SET_FEATURES)
reissue = 0;
+ else {
+ if (fail_all_ncq_cmds ||
+ (fail_all_ncq_write &&
+ fis->command == ATA_CMD_FPDMA_WRITE)) {
+ dev_warn(&dd->pdev->dev,
+ " Fail: %s w/tag %d [%s].\n",
+ fis->command == ATA_CMD_FPDMA_WRITE ?
+ "write" : "read",
+ tag,
+ fail_reason != NULL ?
+ fail_reason : "unknown");
+ atomic_set(&cmd->active, 0);
+ if (cmd->comp_func) {
+ cmd->comp_func(port, tag,
+ cmd->comp_data,
+ -ENODATA);
+ }
+ continue;
+ }
+ }
/*
* First check if this command has
* exceeded its retries.
*/
- if (reissue &&
- (port->commands[tag].retries-- > 0)) {
+ if (reissue && (cmd->retries-- > 0)) {
set_bit(tag, tagaccum);
- /* Update the timeout value. */
- port->commands[tag].comp_time =
- jiffies + msecs_to_jiffies(
- MTIP_NCQ_COMMAND_TIMEOUT_MS);
/* Re-issue the command. */
mtip_issue_ncq_command(port, tag);
@@ -814,13 +924,13 @@
/* Retire a command that will not be reissued */
dev_warn(&port->dd->pdev->dev,
"retiring tag %d\n", tag);
- atomic_set(&port->commands[tag].active, 0);
+ atomic_set(&cmd->active, 0);
- if (port->commands[tag].comp_func)
- port->commands[tag].comp_func(
+ if (cmd->comp_func)
+ cmd->comp_func(
port,
tag,
- port->commands[tag].comp_data,
+ cmd->comp_data,
PORT_IRQ_TF_ERR);
else
dev_warn(&port->dd->pdev->dev,
@@ -828,10 +938,10 @@
tag);
}
}
- print_tags(dd, "TFE tags reissued:", tagaccum);
+ print_tags(dd, "reissued (TFE)", tagaccum, cmd_cnt);
/* clear eh_active */
- clear_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_EH_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
mod_timer(&port->cmd_timer,
@@ -899,7 +1009,7 @@
struct mtip_port *port = dd->port;
struct mtip_cmd *cmd = &port->commands[MTIP_TAG_INTERNAL];
- if (test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
+ if (test_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags) &&
(cmd != NULL) && !(readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL))) {
if (cmd->comp_func) {
@@ -911,8 +1021,6 @@
}
}
- dev_warn(&dd->pdev->dev, "IRQ status 0x%x ignored.\n", port_stat);
-
return;
}
@@ -968,6 +1076,9 @@
/* don't proceed further */
return IRQ_HANDLED;
}
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &dd->dd_flag))
+ return rv;
mtip_process_errors(dd, port_stat & PORT_IRQ_ERR);
}
@@ -1015,6 +1126,39 @@
port->cmd_issue[MTIP_TAG_INDEX(tag)]);
}
+static bool mtip_pause_ncq(struct mtip_port *port,
+ struct host_to_dev_fis *fis)
+{
+ struct host_to_dev_fis *reply;
+ unsigned long task_file_data;
+
+ reply = port->rxfis + RX_FIS_D2H_REG;
+ task_file_data = readl(port->mmio+PORT_TFDATA);
+
+ if ((task_file_data & 1) || (fis->command == ATA_CMD_SEC_ERASE_UNIT))
+ return false;
+
+ if (fis->command == ATA_CMD_SEC_ERASE_PREP) {
+ set_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+ port->ic_pause_timer = jiffies;
+ return true;
+ } else if ((fis->command == ATA_CMD_DOWNLOAD_MICRO) &&
+ (fis->features == 0x03)) {
+ set_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
+ port->ic_pause_timer = jiffies;
+ return true;
+ } else if ((fis->command == ATA_CMD_SEC_ERASE_UNIT) ||
+ ((fis->command == 0xFC) &&
+ (fis->features == 0x27 || fis->features == 0x72 ||
+ fis->features == 0x62 || fis->features == 0x26))) {
+ /* Com reset after secure erase or lowlevel format */
+ mtip_restart_port(port);
+ return false;
+ }
+
+ return false;
+}
+
/*
* Wait for port to quiesce
*
@@ -1033,11 +1177,13 @@
to = jiffies + msecs_to_jiffies(timeout);
do {
- if (test_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags) &&
- test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
+ if (test_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags) &&
+ test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
msleep(20);
continue; /* svc thd is actively issuing commands */
}
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+ return -EFAULT;
/*
* Ignore s_active bit 0 of array element 0.
* This bit will always be set
@@ -1074,7 +1220,7 @@
* -EAGAIN Time out waiting for command to complete.
*/
static int mtip_exec_internal_command(struct mtip_port *port,
- void *fis,
+ struct host_to_dev_fis *fis,
int fis_len,
dma_addr_t buffer,
int buf_len,
@@ -1084,8 +1230,9 @@
{
struct mtip_cmd_sg *command_sg;
DECLARE_COMPLETION_ONSTACK(wait);
- int rv = 0;
+ int rv = 0, ready2go = 1;
struct mtip_cmd *int_cmd = &port->commands[MTIP_TAG_INTERNAL];
+ unsigned long to;
/* Make sure the buffer is 8 byte aligned. This is asic specific. */
if (buffer & 0x00000007) {
@@ -1094,23 +1241,38 @@
return -EFAULT;
}
- /* Only one internal command should be running at a time */
- if (test_and_set_bit(MTIP_TAG_INTERNAL, port->allocated)) {
+ to = jiffies + msecs_to_jiffies(timeout);
+ do {
+ ready2go = !test_and_set_bit(MTIP_TAG_INTERNAL,
+ port->allocated);
+ if (ready2go)
+ break;
+ mdelay(100);
+ } while (time_before(jiffies, to));
+ if (!ready2go) {
dev_warn(&port->dd->pdev->dev,
- "Internal command already active\n");
+ "Internal cmd active. new cmd [%02X]\n", fis->command);
return -EBUSY;
}
- set_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+ set_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+ port->ic_pause_timer = 0;
+
+ if (fis->command == ATA_CMD_SEC_ERASE_UNIT)
+ clear_bit(MTIP_PF_SE_ACTIVE_BIT, &port->flags);
+ else if (fis->command == ATA_CMD_DOWNLOAD_MICRO)
+ clear_bit(MTIP_PF_DM_ACTIVE_BIT, &port->flags);
if (atomic == GFP_KERNEL) {
- /* wait for io to complete if non atomic */
- if (mtip_quiesce_io(port, 5000) < 0) {
- dev_warn(&port->dd->pdev->dev,
- "Failed to quiesce IO\n");
- release_slot(port, MTIP_TAG_INTERNAL);
- clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
- wake_up_interruptible(&port->svc_wait);
- return -EBUSY;
+ if (fis->command != ATA_CMD_STANDBYNOW1) {
+ /* wait for io to complete if non atomic */
+ if (mtip_quiesce_io(port, 5000) < 0) {
+ dev_warn(&port->dd->pdev->dev,
+ "Failed to quiesce IO\n");
+ release_slot(port, MTIP_TAG_INTERNAL);
+ clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
+ wake_up_interruptible(&port->svc_wait);
+ return -EBUSY;
+ }
}
/* Set the completion function and data for the command. */
@@ -1120,7 +1282,7 @@
} else {
/* Clear completion - we're going to poll */
int_cmd->comp_data = NULL;
- int_cmd->comp_func = NULL;
+ int_cmd->comp_func = mtip_null_completion;
}
/* Copy the command to the command table */
@@ -1159,6 +1321,12 @@
"Internal command did not complete [%d] "
"within timeout of %lu ms\n",
atomic, timeout);
+ if (mtip_check_surprise_removal(port->dd->pdev) ||
+ test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ rv = -ENXIO;
+ goto exec_ic_exit;
+ }
rv = -EAGAIN;
}
@@ -1166,31 +1334,59 @@
& (1 << MTIP_TAG_INTERNAL)) {
dev_warn(&port->dd->pdev->dev,
"Retiring internal command but CI is 1.\n");
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ hba_reset_nosleep(port->dd);
+ rv = -ENXIO;
+ } else {
+ mtip_restart_port(port);
+ rv = -EAGAIN;
+ }
+ goto exec_ic_exit;
}
} else {
/* Spin for <timeout> checking if command still outstanding */
timeout = jiffies + msecs_to_jiffies(timeout);
-
- while ((readl(
- port->cmd_issue[MTIP_TAG_INTERNAL])
- & (1 << MTIP_TAG_INTERNAL))
- && time_before(jiffies, timeout))
- ;
+ while ((readl(port->cmd_issue[MTIP_TAG_INTERNAL])
+ & (1 << MTIP_TAG_INTERNAL))
+ && time_before(jiffies, timeout)) {
+ if (mtip_check_surprise_removal(port->dd->pdev)) {
+ rv = -ENXIO;
+ goto exec_ic_exit;
+ }
+ if ((fis->command != ATA_CMD_STANDBYNOW1) &&
+ test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ rv = -ENXIO;
+ goto exec_ic_exit;
+ }
+ }
if (readl(port->cmd_issue[MTIP_TAG_INTERNAL])
& (1 << MTIP_TAG_INTERNAL)) {
dev_err(&port->dd->pdev->dev,
- "Internal command did not complete [%d]\n",
- atomic);
+ "Internal command did not complete [atomic]\n");
rv = -EAGAIN;
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &port->dd->dd_flag)) {
+ hba_reset_nosleep(port->dd);
+ rv = -ENXIO;
+ } else {
+ mtip_restart_port(port);
+ rv = -EAGAIN;
+ }
}
}
-
+exec_ic_exit:
/* Clear the allocated and active bits for the internal command. */
atomic_set(&int_cmd->active, 0);
release_slot(port, MTIP_TAG_INTERNAL);
- clear_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags);
+ if (rv >= 0 && mtip_pause_ncq(port, fis)) {
+ /* NCQ paused */
+ return rv;
+ }
+ clear_bit(MTIP_PF_IC_ACTIVE_BIT, &port->flags);
wake_up_interruptible(&port->svc_wait);
return rv;
@@ -1240,6 +1436,9 @@
int rv = 0;
struct host_to_dev_fis fis;
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &port->dd->dd_flag))
+ return -EFAULT;
+
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
fis.type = 0x27;
@@ -1313,6 +1512,7 @@
{
int rv;
struct host_to_dev_fis fis;
+ unsigned long start;
/* Build the FIS. */
memset(&fis, 0, sizeof(struct host_to_dev_fis));
@@ -1320,15 +1520,150 @@
fis.opts = 1 << 7;
fis.command = ATA_CMD_STANDBYNOW1;
- /* Execute the command. Use a 15-second timeout for large drives. */
+ start = jiffies;
rv = mtip_exec_internal_command(port,
&fis,
5,
0,
0,
0,
- GFP_KERNEL,
+ GFP_ATOMIC,
15000);
+ dbg_printk(MTIP_DRV_NAME "Time taken to complete standby cmd: %d ms\n",
+ jiffies_to_msecs(jiffies - start));
+ if (rv)
+ dev_warn(&port->dd->pdev->dev,
+ "STANDBY IMMEDIATE command failed.\n");
+
+ return rv;
+}
+
+/*
+ * Issue a READ LOG EXT command to the device.
+ *
+ * @port pointer to the port structure.
+ * @page page number to fetch
+ * @buffer pointer to buffer
+ * @buffer_dma dma address corresponding to @buffer
+ * @sectors page length to fetch, in sectors
+ *
+ * return value
+ * @rv return value from mtip_exec_internal_command()
+ */
+static int mtip_read_log_page(struct mtip_port *port, u8 page, u16 *buffer,
+ dma_addr_t buffer_dma, unsigned int sectors)
+{
+ struct host_to_dev_fis fis;
+
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = ATA_CMD_READ_LOG_EXT;
+ fis.sect_count = sectors & 0xFF;
+ fis.sect_cnt_ex = (sectors >> 8) & 0xFF;
+ fis.lba_low = page;
+ fis.lba_mid = 0;
+ fis.device = ATA_DEVICE_OBS;
+
+ memset(buffer, 0, sectors * ATA_SECT_SIZE);
+
+ return mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ buffer_dma,
+ sectors * ATA_SECT_SIZE,
+ 0,
+ GFP_ATOMIC,
+ MTIP_INTERNAL_COMMAND_TIMEOUT_MS);
+}
+
+/*
+ * Issue a SMART READ DATA command to the device.
+ *
+ * @port pointer to the port structure.
+ * @buffer pointer to buffer
+ * @buffer_dma dma address corresponding to @buffer
+ *
+ * return value
+ * @rv return value from mtip_exec_internal_command()
+ */
+static int mtip_get_smart_data(struct mtip_port *port, u8 *buffer,
+ dma_addr_t buffer_dma)
+{
+ struct host_to_dev_fis fis;
+
+ memset(&fis, 0, sizeof(struct host_to_dev_fis));
+ fis.type = 0x27;
+ fis.opts = 1 << 7;
+ fis.command = ATA_CMD_SMART;
+ fis.features = 0xD0;
+ fis.sect_count = 1;
+ fis.lba_mid = 0x4F;
+ fis.lba_hi = 0xC2;
+ fis.device = ATA_DEVICE_OBS;
+
+ return mtip_exec_internal_command(port,
+ &fis,
+ 5,
+ buffer_dma,
+ ATA_SECT_SIZE,
+ 0,
+ GFP_ATOMIC,
+ 15000);
+}
+
+/*
+ * Get the value of a smart attribute
+ *
+ * @port pointer to the port structure
+ * @id attribute number
+ * @attrib pointer to return attrib information corresponding to @id
+ *
+ * return value
+ * -EINVAL NULL buffer passed or unsupported attribute @id.
+ * -EPERM Identify data not valid, SMART not supported or not enabled
+ */
+static int mtip_get_smart_attr(struct mtip_port *port, unsigned int id,
+ struct smart_attr *attrib)
+{
+ int rv, i;
+ struct smart_attr *pattr;
+
+ if (!attrib)
+ return -EINVAL;
+
+ if (!port->identify_valid) {
+ dev_warn(&port->dd->pdev->dev, "IDENTIFY DATA not valid\n");
+ return -EPERM;
+ }
+ if (!(port->identify[82] & 0x1)) {
+ dev_warn(&port->dd->pdev->dev, "SMART not supported\n");
+ return -EPERM;
+ }
+ if (!(port->identify[85] & 0x1)) {
+ dev_warn(&port->dd->pdev->dev, "SMART not enabled\n");
+ return -EPERM;
+ }
+
+ memset(port->smart_buf, 0, ATA_SECT_SIZE);
+ rv = mtip_get_smart_data(port, port->smart_buf, port->smart_buf_dma);
+ if (rv) {
+ dev_warn(&port->dd->pdev->dev, "Failed to ge SMART data\n");
+ return rv;
+ }
+
+ pattr = (struct smart_attr *)(port->smart_buf + 2);
+ for (i = 0; i < 29; i++, pattr++)
+ if (pattr->attr_id == id) {
+ memcpy(attrib, pattr, sizeof(struct smart_attr));
+ break;
+ }
+
+ if (i == 29) {
+ dev_warn(&port->dd->pdev->dev,
+ "Query for invalid SMART attribute ID\n");
+ rv = -EINVAL;
+ }
return rv;
}
@@ -1504,10 +1839,7 @@
fis.cyl_hi = command[5];
fis.device = command[6] & ~0x10; /* Clear the dev bit*/
-
- dbg_printk(MTIP_DRV_NAME "%s: User Command: cmd %x, feat %x, "
- "nsect %x, sect %x, lcyl %x, "
- "hcyl %x, sel %x\n",
+ dbg_printk(MTIP_DRV_NAME " %s: User Command: cmd %x, feat %x, nsect %x, sect %x, lcyl %x, hcyl %x, sel %x\n",
__func__,
command[0],
command[1],
@@ -1534,8 +1866,7 @@
command[4] = reply->cyl_low;
command[5] = reply->cyl_hi;
- dbg_printk(MTIP_DRV_NAME "%s: Completion Status: stat %x, "
- "err %x , cyl_lo %x cyl_hi %x\n",
+ dbg_printk(MTIP_DRV_NAME " %s: Completion Status: stat %x, err %x , cyl_lo %x cyl_hi %x\n",
__func__,
command[0],
command[1],
@@ -1578,7 +1909,7 @@
}
dbg_printk(MTIP_DRV_NAME
- "%s: User Command: cmd %x, sect %x, "
+ " %s: User Command: cmd %x, sect %x, "
"feat %x, sectcnt %x\n",
__func__,
command[0],
@@ -1607,7 +1938,7 @@
command[2] = command[3];
dbg_printk(MTIP_DRV_NAME
- "%s: Completion Status: stat %x, "
+ " %s: Completion Status: stat %x, "
"err %x, cmd %x\n",
__func__,
command[0],
@@ -1810,9 +2141,10 @@
}
dbg_printk(MTIP_DRV_NAME
- "taskfile: cmd %x, feat %x, nsect %x,"
+ " %s: cmd %x, feat %x, nsect %x,"
" sect/lbal %x, lcyl/lbam %x, hcyl/lbah %x,"
" head/dev %x\n",
+ __func__,
fis.command,
fis.features,
fis.sect_count,
@@ -1823,8 +2155,8 @@
switch (fis.command) {
case ATA_CMD_DOWNLOAD_MICRO:
- /* Change timeout for Download Microcode to 60 seconds.*/
- timeout = 60000;
+ /* Change timeout for Download Microcode to 2 minutes */
+ timeout = 120000;
break;
case ATA_CMD_SEC_ERASE_UNIT:
/* Change timeout for Security Erase Unit to 4 minutes.*/
@@ -1840,8 +2172,8 @@
timeout = 10000;
break;
case ATA_CMD_SMART:
- /* Change timeout for vendor unique command to 10 secs */
- timeout = 10000;
+ /* Change timeout for vendor unique command to 15 secs */
+ timeout = 15000;
break;
default:
timeout = MTIP_IOCTL_COMMAND_TIMEOUT_MS;
@@ -1903,18 +2235,8 @@
req_task->hob_ports[1] = reply->features_ex;
req_task->hob_ports[2] = reply->sect_cnt_ex;
}
-
- /* Com rest after secure erase or lowlevel format */
- if (((fis.command == ATA_CMD_SEC_ERASE_UNIT) ||
- ((fis.command == 0xFC) &&
- (fis.features == 0x27 || fis.features == 0x72 ||
- fis.features == 0x62 || fis.features == 0x26))) &&
- !(reply->command & 1)) {
- mtip_restart_port(dd->port);
- }
-
dbg_printk(MTIP_DRV_NAME
- "%s: Completion: stat %x,"
+ " %s: Completion: stat %x,"
"err %x, sect_cnt %x, lbalo %x,"
"lbamid %x, lbahi %x, dev %x\n",
__func__,
@@ -2080,14 +2402,10 @@
struct host_to_dev_fis *fis;
struct mtip_port *port = dd->port;
struct mtip_cmd *command = &port->commands[tag];
+ int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
/* Map the scatter list for DMA access */
- if (dir == READ)
- nents = dma_map_sg(&dd->pdev->dev, command->sg,
- nents, DMA_FROM_DEVICE);
- else
- nents = dma_map_sg(&dd->pdev->dev, command->sg,
- nents, DMA_TO_DEVICE);
+ nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
command->scatter_ents = nents;
@@ -2127,7 +2445,7 @@
*/
command->comp_data = dd;
command->comp_func = mtip_async_complete;
- command->direction = (dir == READ ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
+ command->direction = dma_dir;
/*
* Set the completion function and data for the command passed
@@ -2140,19 +2458,16 @@
* To prevent this command from being issued
* if an internal command is in progress or error handling is active.
*/
- if (unlikely(test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) ||
- test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags))) {
+ if (port->flags & MTIP_PF_PAUSE_IO) {
set_bit(tag, port->cmds_to_issue);
- set_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
+ set_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
return;
}
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, tag);
- /* Set the command's timeout value.*/
- port->commands[tag].comp_time = jiffies + msecs_to_jiffies(
- MTIP_NCQ_COMMAND_TIMEOUT_MS);
+ return;
}
/*
@@ -2191,6 +2506,10 @@
down(&dd->port->cmd_slot);
*tag = get_slot(dd->port);
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
+ up(&dd->port->cmd_slot);
+ return NULL;
+ }
if (unlikely(*tag < 0))
return NULL;
@@ -2207,7 +2526,7 @@
* return value
* The size, in bytes, of the data copied into buf.
*/
-static ssize_t hw_show_registers(struct device *dev,
+static ssize_t mtip_hw_show_registers(struct device *dev,
struct device_attribute *attr,
char *buf)
{
@@ -2216,7 +2535,7 @@
int size = 0;
int n;
- size += sprintf(&buf[size], "%s:\ns_active:\n", __func__);
+ size += sprintf(&buf[size], "S ACTive:\n");
for (n = 0; n < dd->slot_groups; n++)
size += sprintf(&buf[size], "0x%08x\n",
@@ -2240,20 +2559,39 @@
group_allocated);
}
- size += sprintf(&buf[size], "completed:\n");
+ size += sprintf(&buf[size], "Completed:\n");
for (n = 0; n < dd->slot_groups; n++)
size += sprintf(&buf[size], "0x%08x\n",
readl(dd->port->completed[n]));
- size += sprintf(&buf[size], "PORT_IRQ_STAT 0x%08x\n",
+ size += sprintf(&buf[size], "PORT IRQ STAT : 0x%08x\n",
readl(dd->port->mmio + PORT_IRQ_STAT));
- size += sprintf(&buf[size], "HOST_IRQ_STAT 0x%08x\n",
+ size += sprintf(&buf[size], "HOST IRQ STAT : 0x%08x\n",
readl(dd->mmio + HOST_IRQ_STAT));
return size;
}
-static DEVICE_ATTR(registers, S_IRUGO, hw_show_registers, NULL);
+
+static ssize_t mtip_hw_show_status(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct driver_data *dd = dev_to_disk(dev)->private_data;
+ int size = 0;
+
+ if (test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "thermal_shutdown\n");
+ else if (test_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag))
+ size += sprintf(buf, "%s", "write_protect\n");
+ else
+ size += sprintf(buf, "%s", "online\n");
+
+ return size;
+}
+
+static DEVICE_ATTR(registers, S_IRUGO, mtip_hw_show_registers, NULL);
+static DEVICE_ATTR(status, S_IRUGO, mtip_hw_show_status, NULL);
/*
* Create the sysfs related attributes.
@@ -2272,7 +2610,10 @@
if (sysfs_create_file(kobj, &dev_attr_registers.attr))
dev_warn(&dd->pdev->dev,
- "Error creating registers sysfs entry\n");
+ "Error creating 'registers' sysfs entry\n");
+ if (sysfs_create_file(kobj, &dev_attr_status.attr))
+ dev_warn(&dd->pdev->dev,
+ "Error creating 'status' sysfs entry\n");
return 0;
}
@@ -2292,6 +2633,7 @@
return -EINVAL;
sysfs_remove_file(kobj, &dev_attr_registers.attr);
+ sysfs_remove_file(kobj, &dev_attr_status.attr);
return 0;
}
@@ -2384,10 +2726,12 @@
"FTL rebuild in progress. Polling for completion.\n");
start = jiffies;
- dd->ftlrebuildflag = 1;
timeout = jiffies + msecs_to_jiffies(MTIP_FTL_REBUILD_TIMEOUT_MS);
do {
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &dd->dd_flag)))
+ return -EFAULT;
if (mtip_check_surprise_removal(dd->pdev))
return -EFAULT;
@@ -2408,22 +2752,17 @@
dev_warn(&dd->pdev->dev,
"FTL rebuild complete (%d secs).\n",
jiffies_to_msecs(jiffies - start) / 1000);
- dd->ftlrebuildflag = 0;
mtip_block_initialize(dd);
- break;
+ return 0;
}
ssleep(10);
} while (time_before(jiffies, timeout));
/* Check for timeout */
- if (dd->ftlrebuildflag) {
- dev_err(&dd->pdev->dev,
+ dev_err(&dd->pdev->dev,
"Timed out waiting for FTL rebuild to complete (%d secs).\n",
jiffies_to_msecs(jiffies - start) / 1000);
- return -EFAULT;
- }
-
- return 0;
+ return -EFAULT;
}
/*
@@ -2448,14 +2787,17 @@
* is in progress nor error handling is active
*/
wait_event_interruptible(port->svc_wait, (port->flags) &&
- !test_bit(MTIP_FLAG_IC_ACTIVE_BIT, &port->flags) &&
- !test_bit(MTIP_FLAG_EH_ACTIVE_BIT, &port->flags));
+ !(port->flags & MTIP_PF_PAUSE_IO));
if (kthread_should_stop())
break;
- set_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
- if (test_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags)) {
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &dd->dd_flag)))
+ break;
+
+ set_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
+ if (test_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags)) {
slot = 1;
/* used to restrict the loop to one iteration */
slot_start = num_cmd_slots;
@@ -2480,21 +2822,19 @@
/* Issue the command to the hardware */
mtip_issue_ncq_command(port, slot);
- /* Set the command's timeout value.*/
- port->commands[slot].comp_time = jiffies +
- msecs_to_jiffies(MTIP_NCQ_COMMAND_TIMEOUT_MS);
-
clear_bit(slot, port->cmds_to_issue);
}
- clear_bit(MTIP_FLAG_ISSUE_CMDS_BIT, &port->flags);
- } else if (test_bit(MTIP_FLAG_REBUILD_BIT, &port->flags)) {
- mtip_ftl_rebuild_poll(dd);
- clear_bit(MTIP_FLAG_REBUILD_BIT, &port->flags);
+ clear_bit(MTIP_PF_ISSUE_CMDS_BIT, &port->flags);
+ } else if (test_bit(MTIP_PF_REBUILD_BIT, &port->flags)) {
+ if (!mtip_ftl_rebuild_poll(dd))
+ set_bit(MTIP_DDF_REBUILD_FAILED_BIT,
+ &dd->dd_flag);
+ clear_bit(MTIP_PF_REBUILD_BIT, &port->flags);
}
- clear_bit(MTIP_FLAG_SVC_THD_ACTIVE_BIT, &port->flags);
+ clear_bit(MTIP_PF_SVC_THD_ACTIVE_BIT, &port->flags);
- if (test_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &port->flags))
+ if (test_bit(MTIP_PF_SVC_THD_STOP_BIT, &port->flags))
break;
}
return 0;
@@ -2513,6 +2853,9 @@
int i;
int rv;
unsigned int num_command_slots;
+ unsigned long timeout, timetaken;
+ unsigned char *buf;
+ struct smart_attr attr242;
dd->mmio = pcim_iomap_table(dd->pdev)[MTIP_ABAR];
@@ -2547,7 +2890,7 @@
/* Allocate memory for the command list. */
dd->port->command_list =
dmam_alloc_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
&dd->port->command_list_dma,
GFP_KERNEL);
if (!dd->port->command_list) {
@@ -2560,7 +2903,7 @@
/* Clear the memory we have allocated. */
memset(dd->port->command_list,
0,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2));
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4));
/* Setup the addresse of the RX FIS. */
dd->port->rxfis = dd->port->command_list + HW_CMD_SLOT_SZ;
@@ -2576,10 +2919,19 @@
dd->port->identify_dma = dd->port->command_tbl_dma +
HW_CMD_TBL_AR_SZ;
- /* Setup the address of the sector buffer. */
+ /* Setup the address of the sector buffer - for some non-ncq cmds */
dd->port->sector_buffer = (void *) dd->port->identify + ATA_SECT_SIZE;
dd->port->sector_buffer_dma = dd->port->identify_dma + ATA_SECT_SIZE;
+ /* Setup the address of the log buf - for read log command */
+ dd->port->log_buf = (void *)dd->port->sector_buffer + ATA_SECT_SIZE;
+ dd->port->log_buf_dma = dd->port->sector_buffer_dma + ATA_SECT_SIZE;
+
+ /* Setup the address of the smart buf - for smart read data command */
+ dd->port->smart_buf = (void *)dd->port->log_buf + ATA_SECT_SIZE;
+ dd->port->smart_buf_dma = dd->port->log_buf_dma + ATA_SECT_SIZE;
+
+
/* Point the command headers at the command tables. */
for (i = 0; i < num_command_slots; i++) {
dd->port->commands[i].command_header =
@@ -2623,14 +2975,43 @@
dd->port->mmio + i*0x80 + PORT_SDBV;
}
- /* Reset the HBA. */
- if (mtip_hba_reset(dd) < 0) {
- dev_err(&dd->pdev->dev,
- "Card did not reset within timeout\n");
- rv = -EIO;
+ timetaken = jiffies;
+ timeout = jiffies + msecs_to_jiffies(30000);
+ while (((readl(dd->port->mmio + PORT_SCR_STAT) & 0x0F) != 0x03) &&
+ time_before(jiffies, timeout)) {
+ mdelay(100);
+ }
+ if (unlikely(mtip_check_surprise_removal(dd->pdev))) {
+ timetaken = jiffies - timetaken;
+ dev_warn(&dd->pdev->dev,
+ "Surprise removal detected at %u ms\n",
+ jiffies_to_msecs(timetaken));
+ rv = -ENODEV;
+ goto out2 ;
+ }
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag))) {
+ timetaken = jiffies - timetaken;
+ dev_warn(&dd->pdev->dev,
+ "Removal detected at %u ms\n",
+ jiffies_to_msecs(timetaken));
+ rv = -EFAULT;
goto out2;
}
+ /* Conditionally reset the HBA. */
+ if (!(readl(dd->mmio + HOST_CAP) & HOST_CAP_NZDMA)) {
+ if (mtip_hba_reset(dd) < 0) {
+ dev_err(&dd->pdev->dev,
+ "Card did not reset within timeout\n");
+ rv = -EIO;
+ goto out2;
+ }
+ } else {
+ /* Clear any pending interrupts on the HBA */
+ writel(readl(dd->mmio + HOST_IRQ_STAT),
+ dd->mmio + HOST_IRQ_STAT);
+ }
+
mtip_init_port(dd->port);
mtip_start_port(dd->port);
@@ -2660,6 +3041,12 @@
mod_timer(&dd->port->cmd_timer,
jiffies + msecs_to_jiffies(MTIP_TIMEOUT_CHECK_PERIOD));
+
+ if (test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)) {
+ rv = -EFAULT;
+ goto out3;
+ }
+
if (mtip_get_identify(dd->port, NULL) < 0) {
rv = -EFAULT;
goto out3;
@@ -2667,10 +3054,47 @@
if (*(dd->port->identify + MTIP_FTL_REBUILD_OFFSET) ==
MTIP_FTL_REBUILD_MAGIC) {
- set_bit(MTIP_FLAG_REBUILD_BIT, &dd->port->flags);
+ set_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags);
return MTIP_FTL_REBUILD_MAGIC;
}
mtip_dump_identify(dd->port);
+
+ /* check write protect, over temp and rebuild statuses */
+ rv = mtip_read_log_page(dd->port, ATA_LOG_SATA_NCQ,
+ dd->port->log_buf,
+ dd->port->log_buf_dma, 1);
+ if (rv) {
+ dev_warn(&dd->pdev->dev,
+ "Error in READ LOG EXT (10h) command\n");
+ /* non-critical error, don't fail the load */
+ } else {
+ buf = (unsigned char *)dd->port->log_buf;
+ if (buf[259] & 0x1) {
+ dev_info(&dd->pdev->dev,
+ "Write protect bit is set.\n");
+ set_bit(MTIP_DDF_WRITE_PROTECT_BIT, &dd->dd_flag);
+ }
+ if (buf[288] == 0xF7) {
+ dev_info(&dd->pdev->dev,
+ "Exceeded Tmax, drive in thermal shutdown.\n");
+ set_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag);
+ }
+ if (buf[288] == 0xBF) {
+ dev_info(&dd->pdev->dev,
+ "Drive indicates rebuild has failed.\n");
+ /* TODO */
+ }
+ }
+
+ /* get write protect progess */
+ memset(&attr242, 0, sizeof(struct smart_attr));
+ if (mtip_get_smart_attr(dd->port, 242, &attr242))
+ dev_warn(&dd->pdev->dev,
+ "Unable to check write protect progress\n");
+ else
+ dev_info(&dd->pdev->dev,
+ "Write protect progress: %d%% (%d blocks)\n",
+ attr242.cur, attr242.data);
return rv;
out3:
@@ -2688,7 +3112,7 @@
/* Free the command/command header memory. */
dmam_free_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
dd->port->command_list,
dd->port->command_list_dma);
out1:
@@ -2712,9 +3136,12 @@
* Send standby immediate (E0h) to the drive so that it
* saves its state.
*/
- if (atomic_read(&dd->drv_cleanup_done) != true) {
+ if (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
- mtip_standby_immediate(dd->port);
+ if (!test_bit(MTIP_PF_REBUILD_BIT, &dd->port->flags))
+ if (mtip_standby_immediate(dd->port))
+ dev_warn(&dd->pdev->dev,
+ "STANDBY IMMEDIATE failed\n");
/* de-initialize the port. */
mtip_deinit_port(dd->port);
@@ -2734,7 +3161,7 @@
/* Free the command/command header memory. */
dmam_free_coherent(&dd->pdev->dev,
- HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 2),
+ HW_PORT_PRIV_DMA_SZ + (ATA_SECT_SIZE * 4),
dd->port->command_list,
dd->port->command_list_dma);
/* Free the memory allocated for the for structure. */
@@ -2892,6 +3319,9 @@
if (!dd)
return -ENOTTY;
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
+ return -ENOTTY;
+
switch (cmd) {
case BLKFLSBUF:
return -ENOTTY;
@@ -2927,6 +3357,9 @@
if (!dd)
return -ENOTTY;
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag)))
+ return -ENOTTY;
+
switch (cmd) {
case BLKFLSBUF:
return -ENOTTY;
@@ -3049,6 +3482,24 @@
int nents = 0;
int tag = 0;
+ if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
+ if (unlikely(test_bit(MTIP_DDF_REMOVE_PENDING_BIT,
+ &dd->dd_flag))) {
+ bio_endio(bio, -ENXIO);
+ return;
+ }
+ if (unlikely(test_bit(MTIP_DDF_OVER_TEMP_BIT, &dd->dd_flag))) {
+ bio_endio(bio, -ENODATA);
+ return;
+ }
+ if (unlikely(test_bit(MTIP_DDF_WRITE_PROTECT_BIT,
+ &dd->dd_flag) &&
+ bio_data_dir(bio))) {
+ bio_endio(bio, -ENODATA);
+ return;
+ }
+ }
+
if (unlikely(!bio_has_data(bio))) {
blk_queue_flush(queue, 0);
bio_endio(bio, 0);
@@ -3061,7 +3512,7 @@
if (unlikely((bio)->bi_vcnt > MTIP_MAX_SG)) {
dev_warn(&dd->pdev->dev,
- "Maximum number of SGL entries exceeded");
+ "Maximum number of SGL entries exceeded\n");
bio_io_error(bio);
mtip_hw_release_scatterlist(dd, tag);
return;
@@ -3210,8 +3661,10 @@
kobject_put(kobj);
}
- if (dd->mtip_svc_handler)
+ if (dd->mtip_svc_handler) {
+ set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
return rv; /* service thread created for handling rebuild */
+ }
start_service_thread:
sprintf(thd_name, "mtip_svc_thd_%02d", index);
@@ -3220,12 +3673,15 @@
dd, thd_name);
if (IS_ERR(dd->mtip_svc_handler)) {
- printk(KERN_ERR "mtip32xx: service thread failed to start\n");
+ dev_err(&dd->pdev->dev, "service thread failed to start\n");
dd->mtip_svc_handler = NULL;
rv = -EFAULT;
goto kthread_run_error;
}
+ if (wait_for_rebuild == MTIP_FTL_REBUILD_MAGIC)
+ rv = wait_for_rebuild;
+
return rv;
kthread_run_error:
@@ -3266,16 +3722,18 @@
struct kobject *kobj;
if (dd->mtip_svc_handler) {
- set_bit(MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT, &dd->port->flags);
+ set_bit(MTIP_PF_SVC_THD_STOP_BIT, &dd->port->flags);
wake_up_interruptible(&dd->port->svc_wait);
kthread_stop(dd->mtip_svc_handler);
}
- /* Clean up the sysfs attributes managed by the protocol layer. */
- kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
- if (kobj) {
- mtip_hw_sysfs_exit(dd, kobj);
- kobject_put(kobj);
+ /* Clean up the sysfs attributes, if created */
+ if (test_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag)) {
+ kobj = kobject_get(&disk_to_dev(dd->disk)->kobj);
+ if (kobj) {
+ mtip_hw_sysfs_exit(dd, kobj);
+ kobject_put(kobj);
+ }
}
/*
@@ -3283,6 +3741,11 @@
* from /dev
*/
del_gendisk(dd->disk);
+
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, dd->index);
+ spin_unlock(&rssd_index_lock);
+
blk_cleanup_queue(dd->queue);
dd->disk = NULL;
dd->queue = NULL;
@@ -3312,6 +3775,11 @@
/* Delete our gendisk structure, and cleanup the blk queue. */
del_gendisk(dd->disk);
+
+ spin_lock(&rssd_index_lock);
+ ida_remove(&rssd_index_ida, dd->index);
+ spin_unlock(&rssd_index_lock);
+
blk_cleanup_queue(dd->queue);
dd->disk = NULL;
dd->queue = NULL;
@@ -3359,11 +3827,6 @@
return -ENOMEM;
}
- /* Set the atomic variable as 1 in case of SRSI */
- atomic_set(&dd->drv_cleanup_done, true);
-
- atomic_set(&dd->resumeflag, false);
-
/* Attach the private data to this PCI device. */
pci_set_drvdata(pdev, dd);
@@ -3420,7 +3883,8 @@
* instance number.
*/
instance++;
-
+ if (rv != MTIP_FTL_REBUILD_MAGIC)
+ set_bit(MTIP_DDF_INIT_DONE_BIT, &dd->dd_flag);
goto done;
block_initialize_err:
@@ -3434,9 +3898,6 @@
pci_set_drvdata(pdev, NULL);
return rv;
done:
- /* Set the atomic variable as 0 in case of SRSI */
- atomic_set(&dd->drv_cleanup_done, true);
-
return rv;
}
@@ -3452,8 +3913,10 @@
struct driver_data *dd = pci_get_drvdata(pdev);
int counter = 0;
+ set_bit(MTIP_DDF_REMOVE_PENDING_BIT, &dd->dd_flag);
+
if (mtip_check_surprise_removal(pdev)) {
- while (atomic_read(&dd->drv_cleanup_done) == false) {
+ while (!test_bit(MTIP_DDF_CLEANUP_BIT, &dd->dd_flag)) {
counter++;
msleep(20);
if (counter == 10) {
@@ -3463,8 +3926,6 @@
}
}
}
- /* Set the atomic variable as 1 in case of SRSI */
- atomic_set(&dd->drv_cleanup_done, true);
/* Clean up the block layer. */
mtip_block_remove(dd);
@@ -3493,7 +3954,7 @@
return -EFAULT;
}
- atomic_set(&dd->resumeflag, true);
+ set_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
/* Disable ports & interrupts then send standby immediate */
rv = mtip_block_suspend(dd);
@@ -3559,7 +4020,7 @@
dev_err(&pdev->dev, "Unable to resume\n");
err:
- atomic_set(&dd->resumeflag, false);
+ clear_bit(MTIP_DDF_RESUME_BIT, &dd->dd_flag);
return rv;
}
@@ -3608,18 +4069,25 @@
*/
static int __init mtip_init(void)
{
+ int error;
+
printk(KERN_INFO MTIP_DRV_NAME " Version " MTIP_DRV_VERSION "\n");
/* Allocate a major block device number to use with this driver. */
- mtip_major = register_blkdev(0, MTIP_DRV_NAME);
- if (mtip_major < 0) {
+ error = register_blkdev(0, MTIP_DRV_NAME);
+ if (error <= 0) {
printk(KERN_ERR "Unable to register block device (%d)\n",
- mtip_major);
+ error);
return -EBUSY;
}
+ mtip_major = error;
/* Register our PCI operations. */
- return pci_register_driver(&mtip_pci_driver);
+ error = pci_register_driver(&mtip_pci_driver);
+ if (error)
+ unregister_blkdev(mtip_major, MTIP_DRV_NAME);
+
+ return error;
}
/*
diff --git a/drivers/block/mtip32xx/mtip32xx.h b/drivers/block/mtip32xx/mtip32xx.h
index e0554a8..4ef5833 100644
--- a/drivers/block/mtip32xx/mtip32xx.h
+++ b/drivers/block/mtip32xx/mtip32xx.h
@@ -34,8 +34,8 @@
/* offset of Device Control register in PCIe extended capabilites space */
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
-/* # of times to retry timed out IOs */
-#define MTIP_MAX_RETRIES 5
+/* # of times to retry timed out/failed IOs */
+#define MTIP_MAX_RETRIES 2
/* Various timeout values in ms */
#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000
@@ -114,12 +114,41 @@
#define __force_bit2int (unsigned int __force)
/* below are bit numbers in 'flags' defined in mtip_port */
-#define MTIP_FLAG_IC_ACTIVE_BIT 0
-#define MTIP_FLAG_EH_ACTIVE_BIT 1
-#define MTIP_FLAG_SVC_THD_ACTIVE_BIT 2
-#define MTIP_FLAG_ISSUE_CMDS_BIT 4
-#define MTIP_FLAG_REBUILD_BIT 5
-#define MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT 8
+#define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */
+#define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */
+#define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */
+#define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */
+#define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
+ (1 << MTIP_PF_EH_ACTIVE_BIT) | \
+ (1 << MTIP_PF_SE_ACTIVE_BIT) | \
+ (1 << MTIP_PF_DM_ACTIVE_BIT))
+
+#define MTIP_PF_SVC_THD_ACTIVE_BIT 4
+#define MTIP_PF_ISSUE_CMDS_BIT 5
+#define MTIP_PF_REBUILD_BIT 6
+#define MTIP_PF_SVC_THD_STOP_BIT 8
+
+/* below are bit numbers in 'dd_flag' defined in driver_data */
+#define MTIP_DDF_REMOVE_PENDING_BIT 1
+#define MTIP_DDF_OVER_TEMP_BIT 2
+#define MTIP_DDF_WRITE_PROTECT_BIT 3
+#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
+ (1 << MTIP_DDF_OVER_TEMP_BIT) | \
+ (1 << MTIP_DDF_WRITE_PROTECT_BIT))
+
+#define MTIP_DDF_CLEANUP_BIT 5
+#define MTIP_DDF_RESUME_BIT 6
+#define MTIP_DDF_INIT_DONE_BIT 7
+#define MTIP_DDF_REBUILD_FAILED_BIT 8
+
+__packed struct smart_attr{
+ u8 attr_id;
+ u16 flags;
+ u8 cur;
+ u8 worst;
+ u32 data;
+ u8 res[3];
+};
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
@@ -345,6 +374,12 @@
* when the command slot and all associated data structures
* are no longer needed.
*/
+ u16 *log_buf;
+ dma_addr_t log_buf_dma;
+
+ u8 *smart_buf;
+ dma_addr_t smart_buf_dma;
+
unsigned long allocated[SLOTBITS_IN_LONGS];
/*
* used to queue commands when an internal command is in progress
@@ -368,6 +403,7 @@
* Timer used to complete commands that have been active for too long.
*/
struct timer_list cmd_timer;
+ unsigned long ic_pause_timer;
/*
* Semaphore used to block threads if there are no
* command slots available.
@@ -404,13 +440,9 @@
unsigned slot_groups; /* number of slot groups the product supports */
- atomic_t drv_cleanup_done; /* Atomic variable for SRSI */
-
unsigned long index; /* Index to determine the disk name */
- unsigned int ftlrebuildflag; /* FTL rebuild flag */
-
- atomic_t resumeflag; /* Atomic variable to track suspend/resume */
+ unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
};
diff --git a/drivers/block/virtio_blk.c b/drivers/block/virtio_blk.c
index c4a60ba..0e4ef3d 100644
--- a/drivers/block/virtio_blk.c
+++ b/drivers/block/virtio_blk.c
@@ -351,6 +351,7 @@
cap_str_10, cap_str_2);
set_capacity(vblk->disk, capacity);
+ revalidate_disk(vblk->disk);
done:
mutex_unlock(&vblk->config_lock);
}
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 0088bf6..73f196c 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -321,6 +321,7 @@
static void xen_blkbk_unmap(struct pending_req *req)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
+ struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int i, invcount = 0;
grant_handle_t handle;
int ret;
@@ -332,25 +333,12 @@
gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
GNTMAP_host_map, handle);
pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
+ pages[invcount] = virt_to_page(vaddr(req, i));
invcount++;
}
- ret = HYPERVISOR_grant_table_op(
- GNTTABOP_unmap_grant_ref, unmap, invcount);
+ ret = gnttab_unmap_refs(unmap, pages, invcount, false);
BUG_ON(ret);
- /*
- * Note, we use invcount, so nr->pages, so we can't index
- * using vaddr(req, i).
- */
- for (i = 0; i < invcount; i++) {
- ret = m2p_remove_override(
- virt_to_page(unmap[i].host_addr), false);
- if (ret) {
- pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
- (unsigned long)unmap[i].host_addr);
- continue;
- }
- }
}
static int xen_blkbk_map(struct blkif_request *req,
@@ -378,7 +366,7 @@
pending_req->blkif->domid);
}
- ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
+ ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
BUG_ON(ret);
/*
@@ -398,15 +386,6 @@
if (ret)
continue;
- ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
- blkbk->pending_page(pending_req, i), NULL);
- if (ret) {
- pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
- (unsigned long)map[i].dev_bus_addr, ret);
- /* We could switch over to GNTTABOP_copy */
- continue;
- }
-
seg[i].buf = map[i].dev_bus_addr |
(req->u.rw.seg[i].first_sect << 9);
}
@@ -419,21 +398,18 @@
int err = 0;
int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
+ unsigned long secure;
blkif->st_ds_req++;
xen_blkif_get(blkif);
- if (blkif->blk_backend_type == BLKIF_BACKEND_PHY ||
- blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
- unsigned long secure = (blkif->vbd.discard_secure &&
- (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
- BLKDEV_DISCARD_SECURE : 0;
- err = blkdev_issue_discard(bdev,
- req->u.discard.sector_number,
- req->u.discard.nr_sectors,
- GFP_KERNEL, secure);
- } else
- err = -EOPNOTSUPP;
+ secure = (blkif->vbd.discard_secure &&
+ (req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
+ BLKDEV_DISCARD_SECURE : 0;
+
+ err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
+ req->u.discard.nr_sectors,
+ GFP_KERNEL, secure);
if (err == -EOPNOTSUPP) {
pr_debug(DRV_PFX "discard op failed, not supported\n");
@@ -830,7 +806,7 @@
int i, mmap_pages;
int rc = 0;
- if (!xen_pv_domain())
+ if (!xen_domain())
return -ENODEV;
blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
diff --git a/drivers/block/xen-blkback/common.h b/drivers/block/xen-blkback/common.h
index d0ee7ed..773cf27 100644
--- a/drivers/block/xen-blkback/common.h
+++ b/drivers/block/xen-blkback/common.h
@@ -146,11 +146,6 @@
BLKIF_PROTOCOL_X86_64 = 3,
};
-enum blkif_backend_type {
- BLKIF_BACKEND_PHY = 1,
- BLKIF_BACKEND_FILE = 2,
-};
-
struct xen_vbd {
/* What the domain refers to this vbd as. */
blkif_vdev_t handle;
@@ -177,7 +172,6 @@
unsigned int irq;
/* Comms information. */
enum blkif_protocol blk_protocol;
- enum blkif_backend_type blk_backend_type;
union blkif_back_rings blk_rings;
void *blk_ring;
/* The VBD attached to this interface. */
diff --git a/drivers/block/xen-blkback/xenbus.c b/drivers/block/xen-blkback/xenbus.c
index 24a2fb5..89860f3 100644
--- a/drivers/block/xen-blkback/xenbus.c
+++ b/drivers/block/xen-blkback/xenbus.c
@@ -381,72 +381,49 @@
err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
"%d", state);
if (err)
- xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
+ dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
return err;
}
-int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
+static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
- char *type;
int err;
int state = 0;
+ struct block_device *bdev = be->blkif->vbd.bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
- type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL);
- if (!IS_ERR(type)) {
- if (strncmp(type, "file", 4) == 0) {
- state = 1;
- blkif->blk_backend_type = BLKIF_BACKEND_FILE;
+ if (blk_queue_discard(q)) {
+ err = xenbus_printf(xbt, dev->nodename,
+ "discard-granularity", "%u",
+ q->limits.discard_granularity);
+ if (err) {
+ dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
+ return;
}
- if (strncmp(type, "phy", 3) == 0) {
- struct block_device *bdev = be->blkif->vbd.bdev;
- struct request_queue *q = bdev_get_queue(bdev);
- if (blk_queue_discard(q)) {
- err = xenbus_printf(xbt, dev->nodename,
- "discard-granularity", "%u",
- q->limits.discard_granularity);
- if (err) {
- xenbus_dev_fatal(dev, err,
- "writing discard-granularity");
- goto kfree;
- }
- err = xenbus_printf(xbt, dev->nodename,
- "discard-alignment", "%u",
- q->limits.discard_alignment);
- if (err) {
- xenbus_dev_fatal(dev, err,
- "writing discard-alignment");
- goto kfree;
- }
- state = 1;
- blkif->blk_backend_type = BLKIF_BACKEND_PHY;
- }
- /* Optional. */
- err = xenbus_printf(xbt, dev->nodename,
- "discard-secure", "%d",
- blkif->vbd.discard_secure);
- if (err) {
- xenbus_dev_fatal(dev, err,
- "writting discard-secure");
- goto kfree;
- }
+ err = xenbus_printf(xbt, dev->nodename,
+ "discard-alignment", "%u",
+ q->limits.discard_alignment);
+ if (err) {
+ dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
+ return;
}
- } else {
- err = PTR_ERR(type);
- xenbus_dev_fatal(dev, err, "reading type");
- goto out;
+ state = 1;
+ /* Optional. */
+ err = xenbus_printf(xbt, dev->nodename,
+ "discard-secure", "%d",
+ blkif->vbd.discard_secure);
+ if (err) {
+ dev_warn(dev-dev, "writing discard-secure (%d)", err);
+ return;
+ }
}
-
err = xenbus_printf(xbt, dev->nodename, "feature-discard",
"%d", state);
if (err)
- xenbus_dev_fatal(dev, err, "writing feature-discard");
-kfree:
- kfree(type);
-out:
- return err;
+ dev_warn(&dev->dev, "writing feature-discard (%d)", err);
}
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state)
@@ -457,7 +434,7 @@
err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
"%d", state);
if (err)
- xenbus_dev_fatal(dev, err, "writing feature-barrier");
+ dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
return err;
}
@@ -689,14 +666,12 @@
return;
}
- err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
- if (err)
- goto abort;
-
- err = xen_blkbk_discard(xbt, be);
-
/* If we can't advertise it is OK. */
- err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
+ xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
+
+ xen_blkbk_discard(xbt, be);
+
+ xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 98cbeba..4e86393 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -43,6 +43,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
+#include <linux/bitmap.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
@@ -81,6 +82,7 @@
*/
struct blkfront_info
{
+ spinlock_t io_lock;
struct mutex mutex;
struct xenbus_device *xbdev;
struct gendisk *gd;
@@ -105,8 +107,6 @@
int is_ready;
};
-static DEFINE_SPINLOCK(blkif_io_lock);
-
static unsigned int nr_minors;
static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock);
@@ -177,8 +177,7 @@
spin_lock(&minor_lock);
if (find_next_bit(minors, end, minor) >= end) {
- for (; minor < end; ++minor)
- __set_bit(minor, minors);
+ bitmap_set(minors, minor, nr);
rc = 0;
} else
rc = -EBUSY;
@@ -193,8 +192,7 @@
BUG_ON(end > nr_minors);
spin_lock(&minor_lock);
- for (; minor < end; ++minor)
- __clear_bit(minor, minors);
+ bitmap_clear(minors, minor, nr);
spin_unlock(&minor_lock);
}
@@ -419,7 +417,7 @@
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
- rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
+ rq = blk_init_queue(do_blkif_request, &info->io_lock);
if (rq == NULL)
return -1;
@@ -636,14 +634,14 @@
if (info->rq == NULL)
return;
- spin_lock_irqsave(&blkif_io_lock, flags);
+ spin_lock_irqsave(&info->io_lock, flags);
/* No more blkif_request(). */
blk_stop_queue(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
- spin_unlock_irqrestore(&blkif_io_lock, flags);
+ spin_unlock_irqrestore(&info->io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work);
@@ -675,16 +673,16 @@
{
struct blkfront_info *info = container_of(work, struct blkfront_info, work);
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
if (info->connected == BLKIF_STATE_CONNECTED)
kick_pending_request_queues(info);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
}
static void blkif_free(struct blkfront_info *info, int suspend)
{
/* Prevent new requests being issued until we fix things up. */
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
info->connected = suspend ?
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
/* No more blkif_request(). */
@@ -692,7 +690,7 @@
blk_stop_queue(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work);
@@ -728,10 +726,10 @@
struct blkfront_info *info = (struct blkfront_info *)dev_id;
int error;
- spin_lock_irqsave(&blkif_io_lock, flags);
+ spin_lock_irqsave(&info->io_lock, flags);
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
- spin_unlock_irqrestore(&blkif_io_lock, flags);
+ spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED;
}
@@ -816,7 +814,7 @@
kick_pending_request_queues(info);
- spin_unlock_irqrestore(&blkif_io_lock, flags);
+ spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED;
}
@@ -991,6 +989,7 @@
}
mutex_init(&info->mutex);
+ spin_lock_init(&info->io_lock);
info->xbdev = dev;
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
@@ -1068,7 +1067,7 @@
xenbus_switch_state(info->xbdev, XenbusStateConnected);
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
@@ -1079,7 +1078,7 @@
/* Kick any other new requests queued since we resumed */
kick_pending_request_queues(info);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
return 0;
}
@@ -1277,10 +1276,10 @@
xenbus_switch_state(info->xbdev, XenbusStateConnected);
/* Kick pending requests. */
- spin_lock_irq(&blkif_io_lock);
+ spin_lock_irq(&info->io_lock);
info->connected = BLKIF_STATE_CONNECTED;
kick_pending_request_queues(info);
- spin_unlock_irq(&blkif_io_lock);
+ spin_unlock_irq(&info->io_lock);
add_disk(info->gd);
@@ -1410,7 +1409,6 @@
mutex_lock(&blkfront_mutex);
bdev = bdget_disk(disk, 0);
- bdput(bdev);
if (bdev->bd_openers)
goto out;
@@ -1441,6 +1439,7 @@
}
out:
+ bdput(bdev);
mutex_unlock(&blkfront_mutex);
return 0;
}
diff --git a/drivers/cpufreq/Kconfig.arm b/drivers/cpufreq/Kconfig.arm
index ffbb446..5961e64 100644
--- a/drivers/cpufreq/Kconfig.arm
+++ b/drivers/cpufreq/Kconfig.arm
@@ -4,6 +4,7 @@
config ARM_OMAP2PLUS_CPUFREQ
bool "TI OMAP2+"
+ depends on ARCH_OMAP2PLUS
default ARCH_OMAP2PLUS
select CPU_FREQ_TABLE
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index edadbda..e03653d 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -430,7 +430,7 @@
config GPIO_SODAVILLE
bool "Intel Sodaville GPIO support"
- depends on X86 && PCI && OF && BROKEN
+ depends on X86 && PCI && OF
select GPIO_GENERIC
select GENERIC_IRQ_CHIP
help
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index 9ad1703..ae5d7f1 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -252,7 +252,7 @@
if (ret < 0)
memset(dev->irq_stat, 0, ARRAY_SIZE(dev->irq_stat));
- for (bank = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
+ for (bank = 0, bit = 0; bank <= ADP5588_BANK(ADP5588_MAXGPIO);
bank++, bit = 0) {
pending = dev->irq_stat[bank] & dev->irq_mask[bank];
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 4627787..19d6fc0 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2382,8 +2382,8 @@
#endif
};
-static struct samsung_gpio_chip exynos5_gpios_1[] = {
#ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_1[] = {
{
.chip = {
.base = EXYNOS5_GPA0(0),
@@ -2541,11 +2541,11 @@
.to_irq = samsung_gpiolib_to_irq,
},
},
-#endif
};
+#endif
-static struct samsung_gpio_chip exynos5_gpios_2[] = {
#ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_2[] = {
{
.chip = {
.base = EXYNOS5_GPE0(0),
@@ -2602,11 +2602,11 @@
},
},
-#endif
};
+#endif
-static struct samsung_gpio_chip exynos5_gpios_3[] = {
#ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_3[] = {
{
.chip = {
.base = EXYNOS5_GPV0(0),
@@ -2638,11 +2638,11 @@
.label = "GPV4",
},
},
-#endif
};
+#endif
-static struct samsung_gpio_chip exynos5_gpios_4[] = {
#ifdef CONFIG_ARCH_EXYNOS5
+static struct samsung_gpio_chip exynos5_gpios_4[] = {
{
.chip = {
.base = EXYNOS5_GPZ(0),
@@ -2650,8 +2650,8 @@
.label = "GPZ",
},
},
-#endif
};
+#endif
#if defined(CONFIG_ARCH_EXYNOS) && defined(CONFIG_OF)
diff --git a/drivers/gpio/gpio-sodaville.c b/drivers/gpio/gpio-sodaville.c
index 9ba15d3..031e5d2 100644
--- a/drivers/gpio/gpio-sodaville.c
+++ b/drivers/gpio/gpio-sodaville.c
@@ -41,7 +41,7 @@
struct sdv_gpio_chip_data {
int irq_base;
void __iomem *gpio_pub_base;
- struct irq_domain id;
+ struct irq_domain *id;
struct irq_chip_generic *gc;
struct bgpio_chip bgpio;
};
@@ -51,10 +51,9 @@
struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
struct sdv_gpio_chip_data *sd = gc->private;
void __iomem *type_reg;
- u32 irq_offs = d->irq - sd->irq_base;
u32 reg;
- if (irq_offs < 8)
+ if (d->hwirq < 8)
type_reg = sd->gpio_pub_base + GPIT1R0;
else
type_reg = sd->gpio_pub_base + GPIT1R1;
@@ -63,11 +62,11 @@
switch (type) {
case IRQ_TYPE_LEVEL_HIGH:
- reg &= ~BIT(4 * (irq_offs % 8));
+ reg &= ~BIT(4 * (d->hwirq % 8));
break;
case IRQ_TYPE_LEVEL_LOW:
- reg |= BIT(4 * (irq_offs % 8));
+ reg |= BIT(4 * (d->hwirq % 8));
break;
default:
@@ -91,7 +90,7 @@
u32 irq_bit = __fls(irq_stat);
irq_stat &= ~BIT(irq_bit);
- generic_handle_irq(sd->irq_base + irq_bit);
+ generic_handle_irq(irq_find_mapping(sd->id, irq_bit));
}
return IRQ_HANDLED;
@@ -127,7 +126,7 @@
}
static struct irq_domain_ops irq_domain_sdv_ops = {
- .dt_translate = sdv_xlate,
+ .xlate = sdv_xlate,
};
static __devinit int sdv_register_irqsupport(struct sdv_gpio_chip_data *sd,
@@ -149,10 +148,6 @@
if (ret)
goto out_free_desc;
- sd->id.irq_base = sd->irq_base;
- sd->id.of_node = of_node_get(pdev->dev.of_node);
- sd->id.ops = &irq_domain_sdv_ops;
-
/*
* This gpio irq controller latches level irqs. Testing shows that if
* we unmask & ACK the IRQ before the source of the interrupt is gone
@@ -179,7 +174,10 @@
IRQ_GC_INIT_MASK_CACHE, IRQ_NOREQUEST,
IRQ_LEVEL | IRQ_NOPROBE);
- irq_domain_add(&sd->id);
+ sd->id = irq_domain_add_legacy(pdev->dev.of_node, SDV_NUM_PUB_GPIOS,
+ sd->irq_base, 0, &irq_domain_sdv_ops, sd);
+ if (!sd->id)
+ goto out_free_irq;
return 0;
out_free_irq:
free_irq(pdev->irq, sd);
@@ -260,7 +258,6 @@
{
struct sdv_gpio_chip_data *sd = pci_get_drvdata(pdev);
- irq_domain_del(&sd->id);
free_irq(pdev->irq, sd);
irq_free_descs(sd->irq_base, SDV_NUM_PUB_GPIOS);
diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
index 145f135..9140236 100644
--- a/drivers/hwmon/acpi_power_meter.c
+++ b/drivers/hwmon/acpi_power_meter.c
@@ -391,6 +391,7 @@
break;
default:
BUG();
+ val = "";
}
return sprintf(buf, "%s\n", val);
diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
index be51037..29b319d 100644
--- a/drivers/hwmon/pmbus/pmbus_core.c
+++ b/drivers/hwmon/pmbus/pmbus_core.c
@@ -710,13 +710,13 @@
* If a negative value is stored in any of the referenced registers, this value
* reflects an error code which will be returned.
*/
-static int pmbus_get_boolean(struct pmbus_data *data, int index, int *val)
+static int pmbus_get_boolean(struct pmbus_data *data, int index)
{
u8 s1 = (index >> 24) & 0xff;
u8 s2 = (index >> 16) & 0xff;
u8 reg = (index >> 8) & 0xff;
u8 mask = index & 0xff;
- int status;
+ int ret, status;
u8 regval;
status = data->status[reg];
@@ -725,7 +725,7 @@
regval = status & mask;
if (!s1 && !s2)
- *val = !!regval;
+ ret = !!regval;
else {
long v1, v2;
struct pmbus_sensor *sensor1, *sensor2;
@@ -739,9 +739,9 @@
v1 = pmbus_reg2data(data, sensor1);
v2 = pmbus_reg2data(data, sensor2);
- *val = !!(regval && v1 >= v2);
+ ret = !!(regval && v1 >= v2);
}
- return 0;
+ return ret;
}
static ssize_t pmbus_show_boolean(struct device *dev,
@@ -750,11 +750,10 @@
struct sensor_device_attribute *attr = to_sensor_dev_attr(da);
struct pmbus_data *data = pmbus_update_device(dev);
int val;
- int err;
- err = pmbus_get_boolean(data, attr->index, &val);
- if (err)
- return err;
+ val = pmbus_get_boolean(data, attr->index);
+ if (val < 0)
+ return val;
return snprintf(buf, PAGE_SIZE, "%d\n", val);
}
diff --git a/drivers/hwmon/smsc47b397.c b/drivers/hwmon/smsc47b397.c
index d3b778d..c5f6be4 100644
--- a/drivers/hwmon/smsc47b397.c
+++ b/drivers/hwmon/smsc47b397.c
@@ -343,10 +343,11 @@
return err;
}
-static int __init smsc47b397_find(unsigned short *addr)
+static int __init smsc47b397_find(void)
{
u8 id, rev;
char *name;
+ unsigned short addr;
superio_enter();
id = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
@@ -370,14 +371,14 @@
rev = superio_inb(SUPERIO_REG_DEVREV);
superio_select(SUPERIO_REG_LD8);
- *addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
+ addr = (superio_inb(SUPERIO_REG_BASE_MSB) << 8)
| superio_inb(SUPERIO_REG_BASE_LSB);
pr_info("found SMSC %s (base address 0x%04x, revision %u)\n",
- name, *addr, rev);
+ name, addr, rev);
superio_exit();
- return 0;
+ return addr;
}
static int __init smsc47b397_init(void)
@@ -385,9 +386,10 @@
unsigned short address;
int ret;
- ret = smsc47b397_find(&address);
- if (ret)
+ ret = smsc47b397_find();
+ if (ret < 0)
return ret;
+ address = ret;
ret = platform_driver_register(&smsc47b397_driver);
if (ret)
diff --git a/drivers/hwmon/smsc47m1.c b/drivers/hwmon/smsc47m1.c
index c590c14..b5aa38d 100644
--- a/drivers/hwmon/smsc47m1.c
+++ b/drivers/hwmon/smsc47m1.c
@@ -491,10 +491,10 @@
.attrs = smsc47m1_attributes,
};
-static int __init smsc47m1_find(unsigned short *addr,
- struct smsc47m1_sio_data *sio_data)
+static int __init smsc47m1_find(struct smsc47m1_sio_data *sio_data)
{
u8 val;
+ unsigned short addr;
superio_enter();
val = force_id ? force_id : superio_inb(SUPERIO_REG_DEVID);
@@ -546,9 +546,9 @@
}
superio_select();
- *addr = (superio_inb(SUPERIO_REG_BASE) << 8)
+ addr = (superio_inb(SUPERIO_REG_BASE) << 8)
| superio_inb(SUPERIO_REG_BASE + 1);
- if (*addr == 0) {
+ if (addr == 0) {
pr_info("Device address not set, will not use\n");
superio_exit();
return -ENODEV;
@@ -565,7 +565,7 @@
}
superio_exit();
- return 0;
+ return addr;
}
/* Restore device to its initial state */
@@ -938,13 +938,15 @@
unsigned short address;
struct smsc47m1_sio_data sio_data;
- if (smsc47m1_find(&address, &sio_data))
- return -ENODEV;
+ err = smsc47m1_find(&sio_data);
+ if (err < 0)
+ return err;
+ address = err;
/* Sets global pdev as a side effect */
err = smsc47m1_device_add(address, &sio_data);
if (err)
- goto exit;
+ return err;
err = platform_driver_probe(&smsc47m1_driver, smsc47m1_probe);
if (err)
@@ -955,7 +957,6 @@
exit_device:
platform_device_unregister(pdev);
smsc47m1_restore(&sio_data);
-exit:
return err;
}
diff --git a/drivers/of/gpio.c b/drivers/of/gpio.c
index bba8121..bf984b6 100644
--- a/drivers/of/gpio.c
+++ b/drivers/of/gpio.c
@@ -140,7 +140,7 @@
if (WARN_ON(gpiospec->args_count < gc->of_gpio_n_cells))
return -EINVAL;
- if (gpiospec->args[0] > gc->ngpio)
+ if (gpiospec->args[0] >= gc->ngpio)
return -EINVAL;
if (flags)
diff --git a/drivers/spi/spi-davinci.c b/drivers/spi/spi-davinci.c
index 31bfba8..9b2901f 100644
--- a/drivers/spi/spi-davinci.c
+++ b/drivers/spi/spi-davinci.c
@@ -653,7 +653,7 @@
dev_dbg(sdev, "Couldn't DMA map a %d bytes RX buffer\n",
rx_buf_count);
if (t->tx_buf)
- dma_unmap_single(NULL, t->tx_dma, t->len,
+ dma_unmap_single(&spi->dev, t->tx_dma, t->len,
DMA_TO_DEVICE);
return -ENOMEM;
}
@@ -692,10 +692,10 @@
if (spicfg->io_type == SPI_IO_TYPE_DMA) {
if (t->tx_buf)
- dma_unmap_single(NULL, t->tx_dma, t->len,
+ dma_unmap_single(&spi->dev, t->tx_dma, t->len,
DMA_TO_DEVICE);
- dma_unmap_single(NULL, t->rx_dma, rx_buf_count,
+ dma_unmap_single(&spi->dev, t->rx_dma, rx_buf_count,
DMA_FROM_DEVICE);
clear_io_bits(dspi->base + SPIINT, SPIINT_DMA_REQ_EN);
diff --git a/drivers/spi/spi-fsl-spi.c b/drivers/spi/spi-fsl-spi.c
index 24cacff..5f748c0 100644
--- a/drivers/spi/spi-fsl-spi.c
+++ b/drivers/spi/spi-fsl-spi.c
@@ -139,10 +139,12 @@
static void fsl_spi_chipselect(struct spi_device *spi, int value)
{
struct mpc8xxx_spi *mpc8xxx_spi = spi_master_get_devdata(spi->master);
- struct fsl_spi_platform_data *pdata = spi->dev.parent->platform_data;
+ struct fsl_spi_platform_data *pdata;
bool pol = spi->mode & SPI_CS_HIGH;
struct spi_mpc8xxx_cs *cs = spi->controller_state;
+ pdata = spi->dev.parent->parent->platform_data;
+
if (value == BITBANG_CS_INACTIVE) {
if (pdata->cs_control)
pdata->cs_control(spi, !pol);
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 31054e3..570f220 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -83,7 +83,7 @@
struct spi_bitbang bitbang;
struct completion xfer_done;
- void *base;
+ void __iomem *base;
int irq;
struct clk *clk;
unsigned long spi_clk;
@@ -766,8 +766,12 @@
}
ret = of_property_read_u32(np, "fsl,spi-num-chipselects", &num_cs);
- if (ret < 0)
- num_cs = mxc_platform_info->num_chipselect;
+ if (ret < 0) {
+ if (mxc_platform_info)
+ num_cs = mxc_platform_info->num_chipselect;
+ else
+ return ret;
+ }
master = spi_alloc_master(&pdev->dev,
sizeof(struct spi_imx_data) + sizeof(int) * num_cs);
@@ -784,7 +788,7 @@
for (i = 0; i < master->num_chipselect; i++) {
int cs_gpio = of_get_named_gpio(np, "cs-gpios", i);
- if (cs_gpio < 0)
+ if (cs_gpio < 0 && mxc_platform_info)
cs_gpio = mxc_platform_info->chipselect[i];
spi_imx->chipselect[i] = cs_gpio;
diff --git a/drivers/video/au1100fb.c b/drivers/video/au1100fb.c
index befcbd8..ffbce45 100644
--- a/drivers/video/au1100fb.c
+++ b/drivers/video/au1100fb.c
@@ -499,7 +499,8 @@
au1100fb_fix.mmio_start = regs_res->start;
au1100fb_fix.mmio_len = resource_size(regs_res);
- if (!devm_request_mem_region(au1100fb_fix.mmio_start,
+ if (!devm_request_mem_region(&dev->dev,
+ au1100fb_fix.mmio_start,
au1100fb_fix.mmio_len,
DRIVER_NAME)) {
print_err("fail to lock memory region at 0x%08lx",
@@ -516,7 +517,7 @@
fbdev->fb_len = fbdev->panel->xres * fbdev->panel->yres *
(fbdev->panel->bpp >> 3) * AU1100FB_NBR_VIDEO_BUFFERS;
- fbdev->fb_mem = dmam_alloc_coherent(&dev->dev, &dev->dev,
+ fbdev->fb_mem = dmam_alloc_coherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
diff --git a/drivers/video/au1200fb.c b/drivers/video/au1200fb.c
index 3e9a773..7ca79f0 100644
--- a/drivers/video/au1200fb.c
+++ b/drivers/video/au1200fb.c
@@ -1724,7 +1724,7 @@
/* Allocate the framebuffer to the maximum screen size */
fbdev->fb_len = (win->w[plane].xres * win->w[plane].yres * bpp) / 8;
- fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev, &dev->dev,
+ fbdev->fb_mem = dmam_alloc_noncoherent(&dev->dev,
PAGE_ALIGN(fbdev->fb_len),
&fbdev->fb_phys, GFP_KERNEL);
if (!fbdev->fb_mem) {
diff --git a/drivers/video/kyro/STG4000Reg.h b/drivers/video/kyro/STG4000Reg.h
index 5d62698..50f4670 100644
--- a/drivers/video/kyro/STG4000Reg.h
+++ b/drivers/video/kyro/STG4000Reg.h
@@ -73,210 +73,210 @@
/* Register Table */
typedef struct {
/* 0h */
- volatile unsigned long Thread0Enable; /* 0x0000 */
- volatile unsigned long Thread1Enable; /* 0x0004 */
- volatile unsigned long Thread0Recover; /* 0x0008 */
- volatile unsigned long Thread1Recover; /* 0x000C */
- volatile unsigned long Thread0Step; /* 0x0010 */
- volatile unsigned long Thread1Step; /* 0x0014 */
- volatile unsigned long VideoInStatus; /* 0x0018 */
- volatile unsigned long Core2InSignStart; /* 0x001C */
- volatile unsigned long Core1ResetVector; /* 0x0020 */
- volatile unsigned long Core1ROMOffset; /* 0x0024 */
- volatile unsigned long Core1ArbiterPriority; /* 0x0028 */
- volatile unsigned long VideoInControl; /* 0x002C */
- volatile unsigned long VideoInReg0CtrlA; /* 0x0030 */
- volatile unsigned long VideoInReg0CtrlB; /* 0x0034 */
- volatile unsigned long VideoInReg1CtrlA; /* 0x0038 */
- volatile unsigned long VideoInReg1CtrlB; /* 0x003C */
- volatile unsigned long Thread0Kicker; /* 0x0040 */
- volatile unsigned long Core2InputSign; /* 0x0044 */
- volatile unsigned long Thread0ProgCtr; /* 0x0048 */
- volatile unsigned long Thread1ProgCtr; /* 0x004C */
- volatile unsigned long Thread1Kicker; /* 0x0050 */
- volatile unsigned long GPRegister1; /* 0x0054 */
- volatile unsigned long GPRegister2; /* 0x0058 */
- volatile unsigned long GPRegister3; /* 0x005C */
- volatile unsigned long GPRegister4; /* 0x0060 */
- volatile unsigned long SerialIntA; /* 0x0064 */
+ volatile u32 Thread0Enable; /* 0x0000 */
+ volatile u32 Thread1Enable; /* 0x0004 */
+ volatile u32 Thread0Recover; /* 0x0008 */
+ volatile u32 Thread1Recover; /* 0x000C */
+ volatile u32 Thread0Step; /* 0x0010 */
+ volatile u32 Thread1Step; /* 0x0014 */
+ volatile u32 VideoInStatus; /* 0x0018 */
+ volatile u32 Core2InSignStart; /* 0x001C */
+ volatile u32 Core1ResetVector; /* 0x0020 */
+ volatile u32 Core1ROMOffset; /* 0x0024 */
+ volatile u32 Core1ArbiterPriority; /* 0x0028 */
+ volatile u32 VideoInControl; /* 0x002C */
+ volatile u32 VideoInReg0CtrlA; /* 0x0030 */
+ volatile u32 VideoInReg0CtrlB; /* 0x0034 */
+ volatile u32 VideoInReg1CtrlA; /* 0x0038 */
+ volatile u32 VideoInReg1CtrlB; /* 0x003C */
+ volatile u32 Thread0Kicker; /* 0x0040 */
+ volatile u32 Core2InputSign; /* 0x0044 */
+ volatile u32 Thread0ProgCtr; /* 0x0048 */
+ volatile u32 Thread1ProgCtr; /* 0x004C */
+ volatile u32 Thread1Kicker; /* 0x0050 */
+ volatile u32 GPRegister1; /* 0x0054 */
+ volatile u32 GPRegister2; /* 0x0058 */
+ volatile u32 GPRegister3; /* 0x005C */
+ volatile u32 GPRegister4; /* 0x0060 */
+ volatile u32 SerialIntA; /* 0x0064 */
- volatile unsigned long Fill0[6]; /* GAP 0x0068 - 0x007C */
+ volatile u32 Fill0[6]; /* GAP 0x0068 - 0x007C */
- volatile unsigned long SoftwareReset; /* 0x0080 */
- volatile unsigned long SerialIntB; /* 0x0084 */
+ volatile u32 SoftwareReset; /* 0x0080 */
+ volatile u32 SerialIntB; /* 0x0084 */
- volatile unsigned long Fill1[37]; /* GAP 0x0088 - 0x011C */
+ volatile u32 Fill1[37]; /* GAP 0x0088 - 0x011C */
- volatile unsigned long ROMELQV; /* 0x011C */
- volatile unsigned long WLWH; /* 0x0120 */
- volatile unsigned long ROMELWL; /* 0x0124 */
+ volatile u32 ROMELQV; /* 0x011C */
+ volatile u32 WLWH; /* 0x0120 */
+ volatile u32 ROMELWL; /* 0x0124 */
- volatile unsigned long dwFill_1; /* GAP 0x0128 */
+ volatile u32 dwFill_1; /* GAP 0x0128 */
- volatile unsigned long IntStatus; /* 0x012C */
- volatile unsigned long IntMask; /* 0x0130 */
- volatile unsigned long IntClear; /* 0x0134 */
+ volatile u32 IntStatus; /* 0x012C */
+ volatile u32 IntMask; /* 0x0130 */
+ volatile u32 IntClear; /* 0x0134 */
- volatile unsigned long Fill2[6]; /* GAP 0x0138 - 0x014C */
+ volatile u32 Fill2[6]; /* GAP 0x0138 - 0x014C */
- volatile unsigned long ROMGPIOA; /* 0x0150 */
- volatile unsigned long ROMGPIOB; /* 0x0154 */
- volatile unsigned long ROMGPIOC; /* 0x0158 */
- volatile unsigned long ROMGPIOD; /* 0x015C */
+ volatile u32 ROMGPIOA; /* 0x0150 */
+ volatile u32 ROMGPIOB; /* 0x0154 */
+ volatile u32 ROMGPIOC; /* 0x0158 */
+ volatile u32 ROMGPIOD; /* 0x015C */
- volatile unsigned long Fill3[2]; /* GAP 0x0160 - 0x0168 */
+ volatile u32 Fill3[2]; /* GAP 0x0160 - 0x0168 */
- volatile unsigned long AGPIntID; /* 0x0168 */
- volatile unsigned long AGPIntClassCode; /* 0x016C */
- volatile unsigned long AGPIntBIST; /* 0x0170 */
- volatile unsigned long AGPIntSSID; /* 0x0174 */
- volatile unsigned long AGPIntPMCSR; /* 0x0178 */
- volatile unsigned long VGAFrameBufBase; /* 0x017C */
- volatile unsigned long VGANotify; /* 0x0180 */
- volatile unsigned long DACPLLMode; /* 0x0184 */
- volatile unsigned long Core1VideoClockDiv; /* 0x0188 */
- volatile unsigned long AGPIntStat; /* 0x018C */
+ volatile u32 AGPIntID; /* 0x0168 */
+ volatile u32 AGPIntClassCode; /* 0x016C */
+ volatile u32 AGPIntBIST; /* 0x0170 */
+ volatile u32 AGPIntSSID; /* 0x0174 */
+ volatile u32 AGPIntPMCSR; /* 0x0178 */
+ volatile u32 VGAFrameBufBase; /* 0x017C */
+ volatile u32 VGANotify; /* 0x0180 */
+ volatile u32 DACPLLMode; /* 0x0184 */
+ volatile u32 Core1VideoClockDiv; /* 0x0188 */
+ volatile u32 AGPIntStat; /* 0x018C */
/*
- volatile unsigned long Fill4[0x0400/4 - 0x0190/4]; //GAP 0x0190 - 0x0400
- volatile unsigned long Fill5[0x05FC/4 - 0x0400/4]; //GAP 0x0400 - 0x05FC Fog Table
- volatile unsigned long Fill6[0x0604/4 - 0x0600/4]; //GAP 0x0600 - 0x0604
- volatile unsigned long Fill7[0x0680/4 - 0x0608/4]; //GAP 0x0608 - 0x0680
- volatile unsigned long Fill8[0x07FC/4 - 0x0684/4]; //GAP 0x0684 - 0x07FC
+ volatile u32 Fill4[0x0400/4 - 0x0190/4]; //GAP 0x0190 - 0x0400
+ volatile u32 Fill5[0x05FC/4 - 0x0400/4]; //GAP 0x0400 - 0x05FC Fog Table
+ volatile u32 Fill6[0x0604/4 - 0x0600/4]; //GAP 0x0600 - 0x0604
+ volatile u32 Fill7[0x0680/4 - 0x0608/4]; //GAP 0x0608 - 0x0680
+ volatile u32 Fill8[0x07FC/4 - 0x0684/4]; //GAP 0x0684 - 0x07FC
*/
- volatile unsigned long Fill4[412]; /* 0x0190 - 0x07FC */
+ volatile u32 Fill4[412]; /* 0x0190 - 0x07FC */
- volatile unsigned long TACtrlStreamBase; /* 0x0800 */
- volatile unsigned long TAObjDataBase; /* 0x0804 */
- volatile unsigned long TAPtrDataBase; /* 0x0808 */
- volatile unsigned long TARegionDataBase; /* 0x080C */
- volatile unsigned long TATailPtrBase; /* 0x0810 */
- volatile unsigned long TAPtrRegionSize; /* 0x0814 */
- volatile unsigned long TAConfiguration; /* 0x0818 */
- volatile unsigned long TAObjDataStartAddr; /* 0x081C */
- volatile unsigned long TAObjDataEndAddr; /* 0x0820 */
- volatile unsigned long TAXScreenClip; /* 0x0824 */
- volatile unsigned long TAYScreenClip; /* 0x0828 */
- volatile unsigned long TARHWClamp; /* 0x082C */
- volatile unsigned long TARHWCompare; /* 0x0830 */
- volatile unsigned long TAStart; /* 0x0834 */
- volatile unsigned long TAObjReStart; /* 0x0838 */
- volatile unsigned long TAPtrReStart; /* 0x083C */
- volatile unsigned long TAStatus1; /* 0x0840 */
- volatile unsigned long TAStatus2; /* 0x0844 */
- volatile unsigned long TAIntStatus; /* 0x0848 */
- volatile unsigned long TAIntMask; /* 0x084C */
+ volatile u32 TACtrlStreamBase; /* 0x0800 */
+ volatile u32 TAObjDataBase; /* 0x0804 */
+ volatile u32 TAPtrDataBase; /* 0x0808 */
+ volatile u32 TARegionDataBase; /* 0x080C */
+ volatile u32 TATailPtrBase; /* 0x0810 */
+ volatile u32 TAPtrRegionSize; /* 0x0814 */
+ volatile u32 TAConfiguration; /* 0x0818 */
+ volatile u32 TAObjDataStartAddr; /* 0x081C */
+ volatile u32 TAObjDataEndAddr; /* 0x0820 */
+ volatile u32 TAXScreenClip; /* 0x0824 */
+ volatile u32 TAYScreenClip; /* 0x0828 */
+ volatile u32 TARHWClamp; /* 0x082C */
+ volatile u32 TARHWCompare; /* 0x0830 */
+ volatile u32 TAStart; /* 0x0834 */
+ volatile u32 TAObjReStart; /* 0x0838 */
+ volatile u32 TAPtrReStart; /* 0x083C */
+ volatile u32 TAStatus1; /* 0x0840 */
+ volatile u32 TAStatus2; /* 0x0844 */
+ volatile u32 TAIntStatus; /* 0x0848 */
+ volatile u32 TAIntMask; /* 0x084C */
- volatile unsigned long Fill5[235]; /* GAP 0x0850 - 0x0BF8 */
+ volatile u32 Fill5[235]; /* GAP 0x0850 - 0x0BF8 */
- volatile unsigned long TextureAddrThresh; /* 0x0BFC */
- volatile unsigned long Core1Translation; /* 0x0C00 */
- volatile unsigned long TextureAddrReMap; /* 0x0C04 */
- volatile unsigned long RenderOutAGPRemap; /* 0x0C08 */
- volatile unsigned long _3DRegionReadTrans; /* 0x0C0C */
- volatile unsigned long _3DPtrReadTrans; /* 0x0C10 */
- volatile unsigned long _3DParamReadTrans; /* 0x0C14 */
- volatile unsigned long _3DRegionReadThresh; /* 0x0C18 */
- volatile unsigned long _3DPtrReadThresh; /* 0x0C1C */
- volatile unsigned long _3DParamReadThresh; /* 0x0C20 */
- volatile unsigned long _3DRegionReadAGPRemap; /* 0x0C24 */
- volatile unsigned long _3DPtrReadAGPRemap; /* 0x0C28 */
- volatile unsigned long _3DParamReadAGPRemap; /* 0x0C2C */
- volatile unsigned long ZBufferAGPRemap; /* 0x0C30 */
- volatile unsigned long TAIndexAGPRemap; /* 0x0C34 */
- volatile unsigned long TAVertexAGPRemap; /* 0x0C38 */
- volatile unsigned long TAUVAddrTrans; /* 0x0C3C */
- volatile unsigned long TATailPtrCacheTrans; /* 0x0C40 */
- volatile unsigned long TAParamWriteTrans; /* 0x0C44 */
- volatile unsigned long TAPtrWriteTrans; /* 0x0C48 */
- volatile unsigned long TAParamWriteThresh; /* 0x0C4C */
- volatile unsigned long TAPtrWriteThresh; /* 0x0C50 */
- volatile unsigned long TATailPtrCacheAGPRe; /* 0x0C54 */
- volatile unsigned long TAParamWriteAGPRe; /* 0x0C58 */
- volatile unsigned long TAPtrWriteAGPRe; /* 0x0C5C */
- volatile unsigned long SDRAMArbiterConf; /* 0x0C60 */
- volatile unsigned long SDRAMConf0; /* 0x0C64 */
- volatile unsigned long SDRAMConf1; /* 0x0C68 */
- volatile unsigned long SDRAMConf2; /* 0x0C6C */
- volatile unsigned long SDRAMRefresh; /* 0x0C70 */
- volatile unsigned long SDRAMPowerStat; /* 0x0C74 */
+ volatile u32 TextureAddrThresh; /* 0x0BFC */
+ volatile u32 Core1Translation; /* 0x0C00 */
+ volatile u32 TextureAddrReMap; /* 0x0C04 */
+ volatile u32 RenderOutAGPRemap; /* 0x0C08 */
+ volatile u32 _3DRegionReadTrans; /* 0x0C0C */
+ volatile u32 _3DPtrReadTrans; /* 0x0C10 */
+ volatile u32 _3DParamReadTrans; /* 0x0C14 */
+ volatile u32 _3DRegionReadThresh; /* 0x0C18 */
+ volatile u32 _3DPtrReadThresh; /* 0x0C1C */
+ volatile u32 _3DParamReadThresh; /* 0x0C20 */
+ volatile u32 _3DRegionReadAGPRemap; /* 0x0C24 */
+ volatile u32 _3DPtrReadAGPRemap; /* 0x0C28 */
+ volatile u32 _3DParamReadAGPRemap; /* 0x0C2C */
+ volatile u32 ZBufferAGPRemap; /* 0x0C30 */
+ volatile u32 TAIndexAGPRemap; /* 0x0C34 */
+ volatile u32 TAVertexAGPRemap; /* 0x0C38 */
+ volatile u32 TAUVAddrTrans; /* 0x0C3C */
+ volatile u32 TATailPtrCacheTrans; /* 0x0C40 */
+ volatile u32 TAParamWriteTrans; /* 0x0C44 */
+ volatile u32 TAPtrWriteTrans; /* 0x0C48 */
+ volatile u32 TAParamWriteThresh; /* 0x0C4C */
+ volatile u32 TAPtrWriteThresh; /* 0x0C50 */
+ volatile u32 TATailPtrCacheAGPRe; /* 0x0C54 */
+ volatile u32 TAParamWriteAGPRe; /* 0x0C58 */
+ volatile u32 TAPtrWriteAGPRe; /* 0x0C5C */
+ volatile u32 SDRAMArbiterConf; /* 0x0C60 */
+ volatile u32 SDRAMConf0; /* 0x0C64 */
+ volatile u32 SDRAMConf1; /* 0x0C68 */
+ volatile u32 SDRAMConf2; /* 0x0C6C */
+ volatile u32 SDRAMRefresh; /* 0x0C70 */
+ volatile u32 SDRAMPowerStat; /* 0x0C74 */
- volatile unsigned long Fill6[2]; /* GAP 0x0C78 - 0x0C7C */
+ volatile u32 Fill6[2]; /* GAP 0x0C78 - 0x0C7C */
- volatile unsigned long RAMBistData; /* 0x0C80 */
- volatile unsigned long RAMBistCtrl; /* 0x0C84 */
- volatile unsigned long FIFOBistKey; /* 0x0C88 */
- volatile unsigned long RAMBistResult; /* 0x0C8C */
- volatile unsigned long FIFOBistResult; /* 0x0C90 */
+ volatile u32 RAMBistData; /* 0x0C80 */
+ volatile u32 RAMBistCtrl; /* 0x0C84 */
+ volatile u32 FIFOBistKey; /* 0x0C88 */
+ volatile u32 RAMBistResult; /* 0x0C8C */
+ volatile u32 FIFOBistResult; /* 0x0C90 */
/*
- volatile unsigned long Fill11[0x0CBC/4 - 0x0C94/4]; //GAP 0x0C94 - 0x0CBC
- volatile unsigned long Fill12[0x0CD0/4 - 0x0CC0/4]; //GAP 0x0CC0 - 0x0CD0 3DRegisters
+ volatile u32 Fill11[0x0CBC/4 - 0x0C94/4]; //GAP 0x0C94 - 0x0CBC
+ volatile u32 Fill12[0x0CD0/4 - 0x0CC0/4]; //GAP 0x0CC0 - 0x0CD0 3DRegisters
*/
- volatile unsigned long Fill7[16]; /* 0x0c94 - 0x0cd0 */
+ volatile u32 Fill7[16]; /* 0x0c94 - 0x0cd0 */
- volatile unsigned long SDRAMAddrSign; /* 0x0CD4 */
- volatile unsigned long SDRAMDataSign; /* 0x0CD8 */
- volatile unsigned long SDRAMSignConf; /* 0x0CDC */
+ volatile u32 SDRAMAddrSign; /* 0x0CD4 */
+ volatile u32 SDRAMDataSign; /* 0x0CD8 */
+ volatile u32 SDRAMSignConf; /* 0x0CDC */
/* DWFILL; //GAP 0x0CE0 */
- volatile unsigned long dwFill_2;
+ volatile u32 dwFill_2;
- volatile unsigned long ISPSignature; /* 0x0CE4 */
+ volatile u32 ISPSignature; /* 0x0CE4 */
- volatile unsigned long Fill8[454]; /*GAP 0x0CE8 - 0x13FC */
+ volatile u32 Fill8[454]; /*GAP 0x0CE8 - 0x13FC */
- volatile unsigned long DACPrimAddress; /* 0x1400 */
- volatile unsigned long DACPrimSize; /* 0x1404 */
- volatile unsigned long DACCursorAddr; /* 0x1408 */
- volatile unsigned long DACCursorCtrl; /* 0x140C */
- volatile unsigned long DACOverlayAddr; /* 0x1410 */
- volatile unsigned long DACOverlayUAddr; /* 0x1414 */
- volatile unsigned long DACOverlayVAddr; /* 0x1418 */
- volatile unsigned long DACOverlaySize; /* 0x141C */
- volatile unsigned long DACOverlayVtDec; /* 0x1420 */
+ volatile u32 DACPrimAddress; /* 0x1400 */
+ volatile u32 DACPrimSize; /* 0x1404 */
+ volatile u32 DACCursorAddr; /* 0x1408 */
+ volatile u32 DACCursorCtrl; /* 0x140C */
+ volatile u32 DACOverlayAddr; /* 0x1410 */
+ volatile u32 DACOverlayUAddr; /* 0x1414 */
+ volatile u32 DACOverlayVAddr; /* 0x1418 */
+ volatile u32 DACOverlaySize; /* 0x141C */
+ volatile u32 DACOverlayVtDec; /* 0x1420 */
- volatile unsigned long Fill9[9]; /* GAP 0x1424 - 0x1444 */
+ volatile u32 Fill9[9]; /* GAP 0x1424 - 0x1444 */
- volatile unsigned long DACVerticalScal; /* 0x1448 */
- volatile unsigned long DACPixelFormat; /* 0x144C */
- volatile unsigned long DACHorizontalScal; /* 0x1450 */
- volatile unsigned long DACVidWinStart; /* 0x1454 */
- volatile unsigned long DACVidWinEnd; /* 0x1458 */
- volatile unsigned long DACBlendCtrl; /* 0x145C */
- volatile unsigned long DACHorTim1; /* 0x1460 */
- volatile unsigned long DACHorTim2; /* 0x1464 */
- volatile unsigned long DACHorTim3; /* 0x1468 */
- volatile unsigned long DACVerTim1; /* 0x146C */
- volatile unsigned long DACVerTim2; /* 0x1470 */
- volatile unsigned long DACVerTim3; /* 0x1474 */
- volatile unsigned long DACBorderColor; /* 0x1478 */
- volatile unsigned long DACSyncCtrl; /* 0x147C */
- volatile unsigned long DACStreamCtrl; /* 0x1480 */
- volatile unsigned long DACLUTAddress; /* 0x1484 */
- volatile unsigned long DACLUTData; /* 0x1488 */
- volatile unsigned long DACBurstCtrl; /* 0x148C */
- volatile unsigned long DACCrcTrigger; /* 0x1490 */
- volatile unsigned long DACCrcDone; /* 0x1494 */
- volatile unsigned long DACCrcResult1; /* 0x1498 */
- volatile unsigned long DACCrcResult2; /* 0x149C */
- volatile unsigned long DACLinecount; /* 0x14A0 */
+ volatile u32 DACVerticalScal; /* 0x1448 */
+ volatile u32 DACPixelFormat; /* 0x144C */
+ volatile u32 DACHorizontalScal; /* 0x1450 */
+ volatile u32 DACVidWinStart; /* 0x1454 */
+ volatile u32 DACVidWinEnd; /* 0x1458 */
+ volatile u32 DACBlendCtrl; /* 0x145C */
+ volatile u32 DACHorTim1; /* 0x1460 */
+ volatile u32 DACHorTim2; /* 0x1464 */
+ volatile u32 DACHorTim3; /* 0x1468 */
+ volatile u32 DACVerTim1; /* 0x146C */
+ volatile u32 DACVerTim2; /* 0x1470 */
+ volatile u32 DACVerTim3; /* 0x1474 */
+ volatile u32 DACBorderColor; /* 0x1478 */
+ volatile u32 DACSyncCtrl; /* 0x147C */
+ volatile u32 DACStreamCtrl; /* 0x1480 */
+ volatile u32 DACLUTAddress; /* 0x1484 */
+ volatile u32 DACLUTData; /* 0x1488 */
+ volatile u32 DACBurstCtrl; /* 0x148C */
+ volatile u32 DACCrcTrigger; /* 0x1490 */
+ volatile u32 DACCrcDone; /* 0x1494 */
+ volatile u32 DACCrcResult1; /* 0x1498 */
+ volatile u32 DACCrcResult2; /* 0x149C */
+ volatile u32 DACLinecount; /* 0x14A0 */
- volatile unsigned long Fill10[151]; /*GAP 0x14A4 - 0x16FC */
+ volatile u32 Fill10[151]; /*GAP 0x14A4 - 0x16FC */
- volatile unsigned long DigVidPortCtrl; /* 0x1700 */
- volatile unsigned long DigVidPortStat; /* 0x1704 */
+ volatile u32 DigVidPortCtrl; /* 0x1700 */
+ volatile u32 DigVidPortStat; /* 0x1704 */
/*
- volatile unsigned long Fill11[0x1FFC/4 - 0x1708/4]; //GAP 0x1708 - 0x1FFC
- volatile unsigned long Fill17[0x3000/4 - 0x2FFC/4]; //GAP 0x2000 - 0x2FFC ALUT
+ volatile u32 Fill11[0x1FFC/4 - 0x1708/4]; //GAP 0x1708 - 0x1FFC
+ volatile u32 Fill17[0x3000/4 - 0x2FFC/4]; //GAP 0x2000 - 0x2FFC ALUT
*/
- volatile unsigned long Fill11[1598];
+ volatile u32 Fill11[1598];
/* DWFILL; //GAP 0x3000 ALUT 256MB offset */
- volatile unsigned long Fill_3;
+ volatile u32 Fill_3;
} STG4000REG;
diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
index 260cca7..26e83d7 100644
--- a/drivers/video/uvesafb.c
+++ b/drivers/video/uvesafb.c
@@ -815,8 +815,15 @@
par->pmi_setpal = pmi_setpal;
par->ypan = ypan;
- if (par->pmi_setpal || par->ypan)
- uvesafb_vbe_getpmi(task, par);
+ if (par->pmi_setpal || par->ypan) {
+ if (__supported_pte_mask & _PAGE_NX) {
+ par->pmi_setpal = par->ypan = 0;
+ printk(KERN_WARNING "uvesafb: NX protection is actively."
+ "We have better not to use the PMI.\n");
+ } else {
+ uvesafb_vbe_getpmi(task, par);
+ }
+ }
#else
/* The protected mode interface is not available on non-x86. */
par->pmi_setpal = par->ypan = 0;
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index d286b40..86eff48 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -405,6 +405,7 @@
bio_put(bio);
bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
+ BUG_ON(!bio);
bio->bi_private = cb;
bio->bi_end_io = end_compressed_bio_write;
bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -687,6 +688,7 @@
comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
GFP_NOFS);
+ BUG_ON(!comp_bio);
comp_bio->bi_private = cb;
comp_bio->bi_end_io = end_compressed_bio_read;
diff --git a/fs/btrfs/extent-tree.c b/fs/btrfs/extent-tree.c
index a844204..2b35f8d 100644
--- a/fs/btrfs/extent-tree.c
+++ b/fs/btrfs/extent-tree.c
@@ -529,9 +529,7 @@
* allocate blocks for the tree root we can't do the fast caching since
* we likely hold important locks.
*/
- if (trans && (!trans->transaction->in_commit) &&
- (root && root != root->fs_info->tree_root) &&
- btrfs_test_opt(root, SPACE_CACHE)) {
+ if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
ret = load_free_space_cache(fs_info, cache);
spin_lock(&cache->lock);
@@ -3152,15 +3150,14 @@
/*
* returns target flags in extended format or 0 if restripe for this
* chunk_type is not in progress
+ *
+ * should be called with either volume_mutex or balance_lock held
*/
static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
{
struct btrfs_balance_control *bctl = fs_info->balance_ctl;
u64 target = 0;
- BUG_ON(!mutex_is_locked(&fs_info->volume_mutex) &&
- !spin_is_locked(&fs_info->balance_lock));
-
if (!bctl)
return 0;
@@ -4205,7 +4202,7 @@
num_bytes += div64_u64(data_used + meta_used, 50);
if (num_bytes * 3 > meta_used)
- num_bytes = div64_u64(meta_used, 3) * 2;
+ num_bytes = div64_u64(meta_used, 3);
return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
}
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8d904dd..cd4b5e4 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1937,7 +1937,7 @@
struct btrfs_mapping_tree *map_tree = &root->fs_info->mapping_tree;
u64 start = eb->start;
unsigned long i, num_pages = num_extent_pages(eb->start, eb->len);
- int ret;
+ int ret = 0;
for (i = 0; i < num_pages; i++) {
struct page *p = extent_buffer_page(eb, i);
@@ -2180,6 +2180,10 @@
}
bio = bio_alloc(GFP_NOFS, 1);
+ if (!bio) {
+ free_io_failure(inode, failrec, 0);
+ return -EIO;
+ }
bio->bi_private = state;
bio->bi_end_io = failed_bio->bi_end_io;
bio->bi_sector = failrec->logical >> 9;
diff --git a/fs/btrfs/free-space-cache.c b/fs/btrfs/free-space-cache.c
index e88330d..202008e 100644
--- a/fs/btrfs/free-space-cache.c
+++ b/fs/btrfs/free-space-cache.c
@@ -748,13 +748,6 @@
u64 used = btrfs_block_group_used(&block_group->item);
/*
- * If we're unmounting then just return, since this does a search on the
- * normal root and not the commit root and we could deadlock.
- */
- if (btrfs_fs_closing(fs_info))
- return 0;
-
- /*
* If this block group has been marked to be cleared for one reason or
* another then we can't trust the on disk cache, so just return.
*/
@@ -768,6 +761,8 @@
path = btrfs_alloc_path();
if (!path)
return 0;
+ path->search_commit_root = 1;
+ path->skip_locking = 1;
inode = lookup_free_space_inode(root, block_group, path);
if (IS_ERR(inode)) {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 90acc82..bc015f7 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1044,6 +1044,8 @@
BUG_ON(!page->page);
bio = bio_alloc(GFP_NOFS, 1);
+ if (!bio)
+ return -EIO;
bio->bi_bdev = page->bdev;
bio->bi_sector = page->physical >> 9;
bio->bi_end_io = scrub_complete_bio_end_io;
@@ -1171,6 +1173,8 @@
DECLARE_COMPLETION_ONSTACK(complete);
bio = bio_alloc(GFP_NOFS, 1);
+ if (!bio)
+ return -EIO;
bio->bi_bdev = page_bad->bdev;
bio->bi_sector = page_bad->physical >> 9;
bio->bi_end_io = scrub_complete_bio_end_io;
diff --git a/fs/btrfs/transaction.c b/fs/btrfs/transaction.c
index 8da29e8..11b77a5 100644
--- a/fs/btrfs/transaction.c
+++ b/fs/btrfs/transaction.c
@@ -480,6 +480,7 @@
struct btrfs_transaction *cur_trans = trans->transaction;
struct btrfs_fs_info *info = root->fs_info;
int count = 0;
+ int err = 0;
if (--trans->use_count) {
trans->block_rsv = trans->orig_rsv;
@@ -532,18 +533,18 @@
if (current->journal_info == trans)
current->journal_info = NULL;
- memset(trans, 0, sizeof(*trans));
- kmem_cache_free(btrfs_trans_handle_cachep, trans);
if (throttle)
btrfs_run_delayed_iputs(root);
if (trans->aborted ||
root->fs_info->fs_state & BTRFS_SUPER_FLAG_ERROR) {
- return -EIO;
+ err = -EIO;
}
- return 0;
+ memset(trans, 0, sizeof(*trans));
+ kmem_cache_free(btrfs_trans_handle_cachep, trans);
+ return err;
}
int btrfs_end_transaction(struct btrfs_trans_handle *trans,
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index a872b48..759d024 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -3833,6 +3833,7 @@
int sub_stripes = 0;
u64 stripes_per_dev = 0;
u32 remaining_stripes = 0;
+ u32 last_stripe = 0;
if (map->type &
(BTRFS_BLOCK_GROUP_RAID0 | BTRFS_BLOCK_GROUP_RAID10)) {
@@ -3846,6 +3847,8 @@
stripe_nr_orig,
factor,
&remaining_stripes);
+ div_u64_rem(stripe_nr_end - 1, factor, &last_stripe);
+ last_stripe *= sub_stripes;
}
for (i = 0; i < num_stripes; i++) {
@@ -3858,16 +3861,29 @@
BTRFS_BLOCK_GROUP_RAID10)) {
bbio->stripes[i].length = stripes_per_dev *
map->stripe_len;
+
if (i / sub_stripes < remaining_stripes)
bbio->stripes[i].length +=
map->stripe_len;
+
+ /*
+ * Special for the first stripe and
+ * the last stripe:
+ *
+ * |-------|...|-------|
+ * |----------|
+ * off end_off
+ */
if (i < sub_stripes)
bbio->stripes[i].length -=
stripe_offset;
- if ((i / sub_stripes + 1) %
- sub_stripes == remaining_stripes)
+
+ if (stripe_index >= last_stripe &&
+ stripe_index <= (last_stripe +
+ sub_stripes - 1))
bbio->stripes[i].length -=
stripe_end_offset;
+
if (i == sub_stripes - 1)
stripe_offset = 0;
} else
diff --git a/include/linux/amba/pl022.h b/include/linux/amba/pl022.h
index b8c5112..76dd1b1 100644
--- a/include/linux/amba/pl022.h
+++ b/include/linux/amba/pl022.h
@@ -25,6 +25,8 @@
#ifndef _SSP_PL022_H
#define _SSP_PL022_H
+#include <linux/types.h>
+
/**
* whether SSP is in loopback mode or not
*/
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 606cf33..2aa2466 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -426,14 +426,10 @@
(1 << QUEUE_FLAG_SAME_COMP) | \
(1 << QUEUE_FLAG_ADD_RANDOM))
-static inline int queue_is_locked(struct request_queue *q)
+static inline void queue_lockdep_assert_held(struct request_queue *q)
{
-#ifdef CONFIG_SMP
- spinlock_t *lock = q->queue_lock;
- return lock && spin_is_locked(lock);
-#else
- return 1;
-#endif
+ if (q->queue_lock)
+ lockdep_assert_held(q->queue_lock);
}
static inline void queue_flag_set_unlocked(unsigned int flag,
@@ -445,7 +441,7 @@
static inline int queue_flag_test_and_clear(unsigned int flag,
struct request_queue *q)
{
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
if (test_bit(flag, &q->queue_flags)) {
__clear_bit(flag, &q->queue_flags);
@@ -458,7 +454,7 @@
static inline int queue_flag_test_and_set(unsigned int flag,
struct request_queue *q)
{
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
if (!test_bit(flag, &q->queue_flags)) {
__set_bit(flag, &q->queue_flags);
@@ -470,7 +466,7 @@
static inline void queue_flag_set(unsigned int flag, struct request_queue *q)
{
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
__set_bit(flag, &q->queue_flags);
}
@@ -487,7 +483,7 @@
static inline void queue_flag_clear(unsigned int flag, struct request_queue *q)
{
- WARN_ON_ONCE(!queue_is_locked(q));
+ queue_lockdep_assert_held(q);
__clear_bit(flag, &q->queue_flags);
}
diff --git a/include/linux/stddef.h b/include/linux/stddef.h
index 6a40c76..1747b67 100644
--- a/include/linux/stddef.h
+++ b/include/linux/stddef.h
@@ -3,15 +3,11 @@
#include <linux/compiler.h>
-#undef NULL
-#if defined(__cplusplus)
-#define NULL 0
-#else
-#define NULL ((void *)0)
-#endif
-
#ifdef __KERNEL__
+#undef NULL
+#define NULL ((void *)0)
+
enum {
false = 0,
true = 1
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index 0c56d44..1588e3b 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -11,6 +11,7 @@
#include <linux/irq_work.h>
#include <linux/percpu.h>
#include <linux/hardirq.h>
+#include <linux/irqflags.h>
#include <asm/processor.h>
/*