Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Pull networking fixes from David Miller:
1) Fix netpoll OOPS in r8169, from Ville Syrjälä.
2) Fix bpf instruction alignment on powerpc et al., from Eric Dumazet.
3) Don't ignore IFLA_MTU attribute when creating new ipvlan links. From
Xin Long.
4) Fix use after free in AF_PACKET, from Eric Dumazet.
5) Mis-matched RTNL unlock in xen-netfront, from Ross Lagerwall.
6) Fix VSOCK loopback on big-endian, from Claudio Imbrenda.
7) Missing RX buffer offset correction when computing DMA addresses in
mvneta driver, from Antoine Tenart.
8) Fix crashes in DCCP's ccid3_hc_rx_send_feedback, from Eric Dumazet.
* git://git.kernel.org/pub/scm/linux/kernel/git/davem/net: (34 commits)
sfc: make function efx_rps_hash_bucket static
strparser: Corrected typo in documentation.
qmi_wwan: add support for the Dell Wireless 5821e module
cxgb4: when disabling dcb set txq dcb priority to 0
net_sched: remove a bogus warning in hfsc
net: dccp: switch rx_tstamp_last_feedback to monotonic clock
net: dccp: avoid crash in ccid3_hc_rx_send_feedback()
net: Remove depends on HAS_DMA in case of platform dependency
MAINTAINERS: Add file patterns for dsa device tree bindings
net: mscc: make sparse happy
net: mvneta: fix the Rx desc DMA address in the Rx path
Documentation: e1000: Fix docs build error
Documentation: e100: Fix docs build error
Documentation: e1000: Use correct heading adornment
Documentation: e100: Use correct heading adornment
ipv6: mcast: fix unsolicited report interval after receiving querys
vhost_net: validate sock before trying to put its fd
VSOCK: fix loopback on big-endian systems
net: ethernet: ti: davinci_cpdma: make function cpdma_desc_pool_create static
xen-netfront: Update features after registering netdev
...
diff --git a/Documentation/admin-guide/pm/intel_pstate.rst b/Documentation/admin-guide/pm/intel_pstate.rst
index ab2fe0e..8b91649 100644
--- a/Documentation/admin-guide/pm/intel_pstate.rst
+++ b/Documentation/admin-guide/pm/intel_pstate.rst
@@ -410,7 +410,7 @@
That only is supported in some configurations, though (for example, if
the `HWP feature is enabled in the processor <Active Mode With HWP_>`_,
the operation mode of the driver cannot be changed), and if it is not
- supported in the current configuration, writes to this attribute with
+ supported in the current configuration, writes to this attribute will
fail with an appropriate error.
Interpretation of Policy Attributes
diff --git a/Documentation/trace/histogram.txt b/Documentation/trace/histogram.txt
index e73bcf9..7ffea6a 100644
--- a/Documentation/trace/histogram.txt
+++ b/Documentation/trace/histogram.txt
@@ -1729,35 +1729,35 @@
associated event field will be saved in a variable but won't be summed
as a value:
- # echo 'hist:keys=next_pid:ts1=common_timestamp ... >> event/trigger
+ # echo 'hist:keys=next_pid:ts1=common_timestamp ...' >> event/trigger
Multiple variables can be assigned at the same time. The below would
result in both ts0 and b being created as variables, with both
common_timestamp and field1 additionally being summed as values:
- # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ... >> \
+ # echo 'hist:keys=pid:vals=$ts0,$b:ts0=common_timestamp,b=field1 ...' >> \
event/trigger
Note that variable assignments can appear either preceding or
following their use. The command below behaves identically to the
command above:
- # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ... >> \
+ # echo 'hist:keys=pid:ts0=common_timestamp,b=field1:vals=$ts0,$b ...' >> \
event/trigger
Any number of variables not bound to a 'vals=' prefix can also be
assigned by simply separating them with colons. Below is the same
thing but without the values being summed in the histogram:
- # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ... >> event/trigger
+ # echo 'hist:keys=pid:ts0=common_timestamp:b=field1 ...' >> event/trigger
Variables set as above can be referenced and used in expressions on
another event.
For example, here's how a latency can be calculated:
- # echo 'hist:keys=pid,prio:ts0=common_timestamp ... >> event1/trigger
- # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ... >> event2/trigger
+ # echo 'hist:keys=pid,prio:ts0=common_timestamp ...' >> event1/trigger
+ # echo 'hist:keys=next_pid:wakeup_lat=common_timestamp-$ts0 ...' >> event2/trigger
In the first line above, the event's timetamp is saved into the
variable ts0. In the next line, ts0 is subtracted from the second
@@ -1766,7 +1766,7 @@
makes use of the wakeup_lat variable to compute a combined latency
using the same key and variable from yet another event:
- # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ... >> event3/trigger
+ # echo 'hist:key=pid:wakeupswitch_lat=$wakeup_lat+$switchtime_lat ...' >> event3/trigger
2.2.2 Synthetic Events
----------------------
@@ -1807,10 +1807,11 @@
At this point, there isn't yet an actual 'wakeup_latency' event
instantiated in the event subsytem - for this to happen, a 'hist
trigger action' needs to be instantiated and bound to actual fields
-and variables defined on other events (see Section 6.3.3 below).
+and variables defined on other events (see Section 2.2.3 below on
+how that is done using hist trigger 'onmatch' action). Once that is
+done, the 'wakeup_latency' synthetic event instance is created.
-Once that is done, an event instance is created, and a histogram can
-be defined using it:
+A histogram can now be defined for the new synthetic event:
# echo 'hist:keys=pid,prio,lat.log2:sort=pid,lat' >> \
/sys/kernel/debug/tracing/events/synthetic/wakeup_latency/trigger
@@ -1960,7 +1961,7 @@
back to that pid, the timestamp difference is calculated. If the
resulting latency, stored in wakeup_lat, exceeds the current
maximum latency, the values specified in the save() fields are
- recoreded:
+ recorded:
# echo 'hist:keys=pid:ts0=common_timestamp.usecs \
if comm=="cyclictest"' >> \
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 495b774..d10944e 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -4610,7 +4610,7 @@
reset, migration and nested KVM for branch prediction blocking. The stfle
facility 82 should not be provided to the guest without this capability.
-8.14 KVM_CAP_HYPERV_TLBFLUSH
+8.18 KVM_CAP_HYPERV_TLBFLUSH
Architectures: x86
diff --git a/MAINTAINERS b/MAINTAINERS
index 977f9a4..e19ec6d 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -15573,6 +15573,7 @@
L: linux-kernel@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
S: Maintained
+F: Documentation/devicetree/bindings/x86/
F: Documentation/x86/
F: arch/x86/
diff --git a/Makefile b/Makefile
index ca2af1a..c913259 100644
--- a/Makefile
+++ b/Makefile
@@ -2,7 +2,7 @@
VERSION = 4
PATCHLEVEL = 18
SUBLEVEL = 0
-EXTRAVERSION = -rc1
+EXTRAVERSION = -rc2
NAME = Merciless Moray
# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 0c4805a..04a4a13 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -555,11 +555,6 @@
If you don't know what to do here, say N.
-config HAVE_DEC_LOCK
- bool
- depends on SMP
- default y
-
config NR_CPUS
int "Maximum number of CPUs (2-32)"
range 2 32
diff --git a/arch/alpha/lib/Makefile b/arch/alpha/lib/Makefile
index 04f9729..854d5e7 100644
--- a/arch/alpha/lib/Makefile
+++ b/arch/alpha/lib/Makefile
@@ -35,8 +35,6 @@
callback_srm.o srm_puts.o srm_printk.o \
fls.o
-lib-$(CONFIG_SMP) += dec_and_lock.o
-
# The division routines are built from single source, with different defines.
AFLAGS___divqu.o = -DDIV
AFLAGS___remqu.o = -DREM
diff --git a/arch/alpha/lib/dec_and_lock.c b/arch/alpha/lib/dec_and_lock.c
deleted file mode 100644
index a117707..0000000
--- a/arch/alpha/lib/dec_and_lock.c
+++ /dev/null
@@ -1,44 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * arch/alpha/lib/dec_and_lock.c
- *
- * ll/sc version of atomic_dec_and_lock()
- *
- */
-
-#include <linux/spinlock.h>
-#include <linux/atomic.h>
-#include <linux/export.h>
-
- asm (".text \n\
- .global _atomic_dec_and_lock \n\
- .ent _atomic_dec_and_lock \n\
- .align 4 \n\
-_atomic_dec_and_lock: \n\
- .prologue 0 \n\
-1: ldl_l $1, 0($16) \n\
- subl $1, 1, $1 \n\
- beq $1, 2f \n\
- stl_c $1, 0($16) \n\
- beq $1, 4f \n\
- mb \n\
- clr $0 \n\
- ret \n\
-2: br $29, 3f \n\
-3: ldgp $29, 0($29) \n\
- br $atomic_dec_and_lock_1..ng \n\
- .subsection 2 \n\
-4: br 1b \n\
- .previous \n\
- .end _atomic_dec_and_lock");
-
-static int __used atomic_dec_and_lock_1(atomic_t *atomic, spinlock_t *lock)
-{
- /* Slow path */
- spin_lock(lock);
- if (atomic_dec_and_test(atomic))
- return 1;
- spin_unlock(lock);
- return 0;
-}
-EXPORT_SYMBOL(_atomic_dec_and_lock);
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index f09e9d66..dec130e 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -544,7 +544,7 @@
* Increment event counter and perform fixup for the pre-signal
* frame.
*/
- rseq_signal_deliver(regs);
+ rseq_signal_deliver(ksig, regs);
/*
* Set up the stack frame
@@ -666,7 +666,7 @@
} else {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
}
local_irq_disable();
diff --git a/arch/arm/xen/enlighten.c b/arch/arm/xen/enlighten.c
index 8073625..07060e5 100644
--- a/arch/arm/xen/enlighten.c
+++ b/arch/arm/xen/enlighten.c
@@ -59,6 +59,9 @@
static __read_mostly unsigned int xen_events_irq;
+uint32_t xen_start_flags;
+EXPORT_SYMBOL(xen_start_flags);
+
int xen_remap_domain_gfn_array(struct vm_area_struct *vma,
unsigned long addr,
xen_pfn_t *gfn, int nr,
@@ -293,9 +296,7 @@
xen_setup_features();
if (xen_feature(XENFEAT_dom0))
- xen_start_info->flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
- else
- xen_start_info->flags &= ~(SIF_INITDOMAIN|SIF_PRIVILEGED);
+ xen_start_flags |= SIF_INITDOMAIN|SIF_PRIVILEGED;
if (!console_set_on_cmdline && !xen_initial_domain())
add_preferred_console("hvc", 0, NULL);
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 253188f..e3e5095 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -223,8 +223,8 @@
kernel_neon_begin();
aes_ctr_encrypt(walk.dst.virt.addr, walk.src.virt.addr,
(u8 *)ctx->key_enc, rounds, blocks, walk.iv);
- err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
kernel_neon_end();
+ err = skcipher_walk_done(&walk, walk.nbytes % AES_BLOCK_SIZE);
}
if (walk.nbytes) {
u8 __aligned(8) tail[AES_BLOCK_SIZE];
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index fda9a8c..fe8777b 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -306,6 +306,7 @@
#define KVM_ARM64_FP_ENABLED (1 << 1) /* guest FP regs loaded */
#define KVM_ARM64_FP_HOST (1 << 2) /* host FP regs loaded */
#define KVM_ARM64_HOST_SVE_IN_USE (1 << 3) /* backup for host TIF_SVE */
+#define KVM_ARM64_HOST_SVE_ENABLED (1 << 4) /* SVE enabled for EL0 */
#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index 6171178..a8f8481 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -728,6 +728,17 @@
asm volatile("msr_s " __stringify(r) ", %x0" : : "rZ" (__val)); \
} while (0)
+/*
+ * Modify bits in a sysreg. Bits in the clear mask are zeroed, then bits in the
+ * set mask are set. Other bits are left as-is.
+ */
+#define sysreg_clear_set(sysreg, clear, set) do { \
+ u64 __scs_val = read_sysreg(sysreg); \
+ u64 __scs_new = (__scs_val & ~(u64)(clear)) | (set); \
+ if (__scs_new != __scs_val) \
+ write_sysreg(__scs_new, sysreg); \
+} while (0)
+
static inline void config_sctlr_el1(u32 clear, u32 set)
{
u32 val;
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index d2856b1..f24892a 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -937,7 +937,7 @@
__kpti_forced = enabled ? 1 : -1;
return 0;
}
-__setup("kpti=", parse_kpti);
+early_param("kpti", parse_kpti);
#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
#ifdef CONFIG_ARM64_HW_AFDBM
diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c
index f3e2e3ae..2faa986 100644
--- a/arch/arm64/kernel/smp.c
+++ b/arch/arm64/kernel/smp.c
@@ -179,7 +179,7 @@
* This is the secondary CPU boot entry. We're using this CPUs
* idle thread stack, but a set of temporary page tables.
*/
-asmlinkage void secondary_start_kernel(void)
+asmlinkage notrace void secondary_start_kernel(void)
{
u64 mpidr = read_cpuid_mpidr() & MPIDR_HWID_BITMASK;
struct mm_struct *mm = &init_mm;
diff --git a/arch/arm64/kvm/fpsimd.c b/arch/arm64/kvm/fpsimd.c
index dc6ecfa..aac7808 100644
--- a/arch/arm64/kvm/fpsimd.c
+++ b/arch/arm64/kvm/fpsimd.c
@@ -5,13 +5,14 @@
* Copyright 2018 Arm Limited
* Author: Dave Martin <Dave.Martin@arm.com>
*/
-#include <linux/bottom_half.h>
+#include <linux/irqflags.h>
#include <linux/sched.h>
#include <linux/thread_info.h>
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_host.h>
#include <asm/kvm_mmu.h>
+#include <asm/sysreg.h>
/*
* Called on entry to KVM_RUN unless this vcpu previously ran at least
@@ -61,10 +62,16 @@
{
BUG_ON(!current->mm);
- vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED | KVM_ARM64_HOST_SVE_IN_USE);
+ vcpu->arch.flags &= ~(KVM_ARM64_FP_ENABLED |
+ KVM_ARM64_HOST_SVE_IN_USE |
+ KVM_ARM64_HOST_SVE_ENABLED);
vcpu->arch.flags |= KVM_ARM64_FP_HOST;
+
if (test_thread_flag(TIF_SVE))
vcpu->arch.flags |= KVM_ARM64_HOST_SVE_IN_USE;
+
+ if (read_sysreg(cpacr_el1) & CPACR_EL1_ZEN_EL0EN)
+ vcpu->arch.flags |= KVM_ARM64_HOST_SVE_ENABLED;
}
/*
@@ -92,19 +99,30 @@
*/
void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu)
{
- local_bh_disable();
+ unsigned long flags;
- update_thread_flag(TIF_SVE,
- vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+ local_irq_save(flags);
if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) {
/* Clean guest FP state to memory and invalidate cpu view */
fpsimd_save();
fpsimd_flush_cpu_state();
- } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
- /* Ensure user trap controls are correctly restored */
- fpsimd_bind_task_to_cpu();
+ } else if (system_supports_sve()) {
+ /*
+ * The FPSIMD/SVE state in the CPU has not been touched, and we
+ * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been
+ * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE
+ * for EL0. To avoid spurious traps, restore the trap state
+ * seen by kvm_arch_vcpu_load_fp():
+ */
+ if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED)
+ sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN);
+ else
+ sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0);
}
- local_bh_enable();
+ update_thread_flag(TIF_SVE,
+ vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE);
+
+ local_irq_restore(flags);
}
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 49e217a..61e93f0 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -583,13 +583,14 @@
size >> PAGE_SHIFT);
return NULL;
}
- if (!coherent)
- __dma_flush_area(page_to_virt(page), iosize);
-
addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
prot,
__builtin_return_address(0));
- if (!addr) {
+ if (addr) {
+ memset(addr, 0, size);
+ if (!coherent)
+ __dma_flush_area(page_to_virt(page), iosize);
+ } else {
iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
dma_release_from_contiguous(dev, page,
size >> PAGE_SHIFT);
diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S
index 5f9a73a..03646e6 100644
--- a/arch/arm64/mm/proc.S
+++ b/arch/arm64/mm/proc.S
@@ -217,8 +217,9 @@
.macro __idmap_kpti_put_pgtable_ent_ng, type
orr \type, \type, #PTE_NG // Same bit for blocks and pages
- str \type, [cur_\()\type\()p] // Update the entry and ensure it
- dc civac, cur_\()\type\()p // is visible to all CPUs.
+ str \type, [cur_\()\type\()p] // Update the entry and ensure
+ dmb sy // that it is visible to all
+ dc civac, cur_\()\type\()p // CPUs.
.endm
/*
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 3f9deec..08c10c5 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -65,6 +65,7 @@
select HAVE_OPROFILE
select HAVE_PERF_EVENTS
select HAVE_REGS_AND_STACK_ACCESS_API
+ select HAVE_RSEQ
select HAVE_STACKPROTECTOR
select HAVE_SYSCALL_TRACEPOINTS
select HAVE_VIRT_CPU_ACCOUNTING_GEN if 64BIT || !SMP
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
index 6b2c6f3..75fb96c 100644
--- a/arch/mips/ath79/mach-pb44.c
+++ b/arch/mips/ath79/mach-pb44.c
@@ -34,7 +34,7 @@
#define PB44_KEYS_DEBOUNCE_INTERVAL (3 * PB44_KEYS_POLL_INTERVAL)
static struct gpiod_lookup_table pb44_i2c_gpiod_table = {
- .dev_id = "i2c-gpio",
+ .dev_id = "i2c-gpio.0",
.table = {
GPIO_LOOKUP_IDX("ath79-gpio", PB44_GPIO_I2C_SDA,
NULL, 0, GPIO_ACTIVE_HIGH | GPIO_OPEN_DRAIN),
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index 6054d49..8c9cbf1 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -212,6 +212,12 @@
*/
if (bcm47xx_bus.bcma.bus.chipinfo.id == BCMA_CHIP_ID_BCM4706)
cpu_wait = NULL;
+
+ /*
+ * BCM47XX Erratum "R10: PCIe Transactions Periodically Fail"
+ * Enable ExternalSync for sync instruction to take effect
+ */
+ set_c0_config7(MIPS_CONF7_ES);
break;
#endif
}
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index a7d0b83..cea8ad8 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -414,6 +414,8 @@
__val = *__addr; \
slow; \
\
+ /* prevent prefetching of coherent DMA data prematurely */ \
+ rmb(); \
return pfx##ioswab##bwlq(__addr, __val); \
}
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index ae461d9..0bc2708 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -681,6 +681,8 @@
#define MIPS_CONF7_WII (_ULCAST_(1) << 31)
#define MIPS_CONF7_RPS (_ULCAST_(1) << 2)
+/* ExternalSync */
+#define MIPS_CONF7_ES (_ULCAST_(1) << 8)
#define MIPS_CONF7_IAR (_ULCAST_(1) << 10)
#define MIPS_CONF7_AR (_ULCAST_(1) << 16)
@@ -2765,6 +2767,7 @@
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
__BUILD_SET_C0(config5)
+__BUILD_SET_C0(config7)
__BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index bb05e99..f25dd1d 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -388,17 +388,19 @@
#define __NR_pkey_alloc (__NR_Linux + 364)
#define __NR_pkey_free (__NR_Linux + 365)
#define __NR_statx (__NR_Linux + 366)
+#define __NR_rseq (__NR_Linux + 367)
+#define __NR_io_pgetevents (__NR_Linux + 368)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 366
+#define __NR_Linux_syscalls 368
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 366
+#define __NR_O32_Linux_syscalls 368
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -733,16 +735,18 @@
#define __NR_pkey_alloc (__NR_Linux + 324)
#define __NR_pkey_free (__NR_Linux + 325)
#define __NR_statx (__NR_Linux + 326)
+#define __NR_rseq (__NR_Linux + 327)
+#define __NR_io_pgetevents (__NR_Linux + 328)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 326
+#define __NR_Linux_syscalls 328
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 326
+#define __NR_64_Linux_syscalls 328
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1081,15 +1085,17 @@
#define __NR_pkey_alloc (__NR_Linux + 328)
#define __NR_pkey_free (__NR_Linux + 329)
#define __NR_statx (__NR_Linux + 330)
+#define __NR_rseq (__NR_Linux + 331)
+#define __NR_io_pgetevents (__NR_Linux + 332)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 330
+#define __NR_Linux_syscalls 332
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 330
+#define __NR_N32_Linux_syscalls 332
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
index 38a3029..d7de8ad 100644
--- a/arch/mips/kernel/entry.S
+++ b/arch/mips/kernel/entry.S
@@ -79,6 +79,10 @@
jal schedule_tail # a0 = struct task_struct *prev
FEXPORT(syscall_exit)
+#ifdef CONFIG_DEBUG_RSEQ
+ move a0, sp
+ jal rseq_syscall
+#endif
local_irq_disable # make sure need_resched and
# signals dont change between
# sampling and return
@@ -141,6 +145,10 @@
j resume_userspace_check
FEXPORT(syscall_exit_partial)
+#ifdef CONFIG_DEBUG_RSEQ
+ move a0, sp
+ jal rseq_syscall
+#endif
local_irq_disable # make sure need_resched doesn't
# change between and return
LONG_L a2, TI_FLAGS($28) # current->work
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
index f2ee7e1..cff52b2 100644
--- a/arch/mips/kernel/mcount.S
+++ b/arch/mips/kernel/mcount.S
@@ -119,10 +119,20 @@
EXPORT_SYMBOL(_mcount)
PTR_LA t1, ftrace_stub
PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
- bne t1, t2, static_trace
+ beq t1, t2, fgraph_trace
nop
+ MCOUNT_SAVE_REGS
+
+ move a0, ra /* arg1: self return address */
+ jalr t2 /* (1) call *ftrace_trace_function */
+ move a1, AT /* arg2: parent's return address */
+
+ MCOUNT_RESTORE_REGS
+
+fgraph_trace:
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ PTR_LA t1, ftrace_stub
PTR_L t3, ftrace_graph_return
bne t1, t3, ftrace_graph_caller
nop
@@ -131,24 +141,11 @@
bne t1, t3, ftrace_graph_caller
nop
#endif
- b ftrace_stub
-#ifdef CONFIG_32BIT
- addiu sp, sp, 8
-#else
- nop
-#endif
-static_trace:
- MCOUNT_SAVE_REGS
-
- move a0, ra /* arg1: self return address */
- jalr t2 /* (1) call *ftrace_trace_function */
- move a1, AT /* arg2: parent's return address */
-
- MCOUNT_RESTORE_REGS
#ifdef CONFIG_32BIT
addiu sp, sp, 8
#endif
+
.globl ftrace_stub
ftrace_stub:
RETURN_BACK
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a9a7d78..91d3c8c 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -590,3 +590,5 @@
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
PTR sys_statx
+ PTR sys_rseq
+ PTR sys_io_pgetevents
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 65d5aee..358d959 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -439,4 +439,6 @@
PTR sys_pkey_alloc
PTR sys_pkey_free /* 5325 */
PTR sys_statx
+ PTR sys_rseq
+ PTR sys_io_pgetevents
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index cbf190e..c65eaac 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -434,4 +434,6 @@
PTR sys_pkey_alloc
PTR sys_pkey_free
PTR sys_statx /* 6330 */
+ PTR sys_rseq
+ PTR compat_sys_io_pgetevents
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index 9ebe3e2..73913f0 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -583,4 +583,6 @@
PTR sys_pkey_alloc
PTR sys_pkey_free /* 4365 */
PTR sys_statx
+ PTR sys_rseq
+ PTR compat_sys_io_pgetevents
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index 9e22446..00f2535 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -801,6 +801,8 @@
regs->regs[0] = 0; /* Don't deal with this again. */
}
+ rseq_signal_deliver(regs);
+
if (sig_uses_siginfo(&ksig->ka, abi))
ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
ksig, regs, oldset);
@@ -868,6 +870,7 @@
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
+ rseq_handle_notify_resume(regs);
}
user_enter();
diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile
index bd06a3c..2ea575c 100644
--- a/arch/powerpc/Makefile
+++ b/arch/powerpc/Makefile
@@ -244,6 +244,7 @@
cpu-as-$(CONFIG_ALTIVEC) += $(call as-option,-Wa$(comma)-maltivec)
cpu-as-$(CONFIG_E200) += -Wa,-me200
cpu-as-$(CONFIG_PPC_BOOK3S_64) += -Wa,-mpower4
+cpu-as-$(CONFIG_PPC_E500MC) += $(call as-option,-Wa$(comma)-me500mc)
KBUILD_AFLAGS += $(cpu-as-y)
KBUILD_CFLAGS += $(cpu-as-y)
diff --git a/arch/powerpc/include/asm/book3s/32/pgalloc.h b/arch/powerpc/include/asm/book3s/32/pgalloc.h
index 6a66739..e463380 100644
--- a/arch/powerpc/include/asm/book3s/32/pgalloc.h
+++ b/arch/powerpc/include/asm/book3s/32/pgalloc.h
@@ -108,6 +108,7 @@
}
#define check_pgt_cache() do { } while (0)
+#define get_hugepd_cache_index(x) (x)
#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
index af5f2ba..a069dfc 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-4k.h
@@ -49,6 +49,27 @@
}
#define is_hugepd(hpd) (hugepd_ok(hpd))
+/*
+ * 16M and 16G huge page directory tables are allocated from slab cache
+ *
+ */
+#define H_16M_CACHE_INDEX (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE - 24)
+#define H_16G_CACHE_INDEX \
+ (PAGE_SHIFT + H_PTE_INDEX_SIZE + H_PMD_INDEX_SIZE + H_PUD_INDEX_SIZE - 34)
+
+static inline int get_hugepd_cache_index(int index)
+{
+ switch (index) {
+ case H_16M_CACHE_INDEX:
+ return HTLB_16M_INDEX;
+ case H_16G_CACHE_INDEX:
+ return HTLB_16G_INDEX;
+ default:
+ BUG();
+ }
+ /* should not reach */
+}
+
#else /* !CONFIG_HUGETLB_PAGE */
static inline int pmd_huge(pmd_t pmd) { return 0; }
static inline int pud_huge(pud_t pud) { return 0; }
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
index fb4b3ba..d7ee249 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable-64k.h
@@ -45,8 +45,17 @@
{
return 0;
}
+
#define is_hugepd(pdep) 0
+/*
+ * This should never get called
+ */
+static inline int get_hugepd_cache_index(int index)
+{
+ BUG();
+}
+
#else /* !CONFIG_HUGETLB_PAGE */
static inline int pmd_huge(pmd_t pmd) { return 0; }
static inline int pud_huge(pud_t pud) { return 0; }
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 63cee15..42aafba 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -287,6 +287,11 @@
PMD_INDEX,
PUD_INDEX,
PGD_INDEX,
+ /*
+ * Below are used with 4k page size and hugetlb
+ */
+ HTLB_16M_INDEX,
+ HTLB_16G_INDEX,
};
extern unsigned long __vmalloc_start;
diff --git a/arch/powerpc/include/asm/nmi.h b/arch/powerpc/include/asm/nmi.h
index 0f571e0..bd9ba8d 100644
--- a/arch/powerpc/include/asm/nmi.h
+++ b/arch/powerpc/include/asm/nmi.h
@@ -8,7 +8,7 @@
static inline void arch_touch_nmi_watchdog(void) {}
#endif
-#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_STACKTRACE)
+#if defined(CONFIG_NMI_IPI) && defined(CONFIG_STACKTRACE)
extern void arch_trigger_cpumask_backtrace(const cpumask_t *mask,
bool exclude_self);
#define arch_trigger_cpumask_backtrace arch_trigger_cpumask_backtrace
diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h
index 1707781..9de40eb 100644
--- a/arch/powerpc/include/asm/nohash/32/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h
@@ -109,6 +109,7 @@
}
#define check_pgt_cache() do { } while (0)
+#define get_hugepd_cache_index(x) (x)
#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb,
diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h
index 0e693f3..e2d62d0 100644
--- a/arch/powerpc/include/asm/nohash/64/pgalloc.h
+++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h
@@ -141,6 +141,7 @@
}
}
+#define get_hugepd_cache_index(x) (x)
#ifdef CONFIG_SMP
static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
{
diff --git a/arch/powerpc/kernel/dt_cpu_ftrs.c b/arch/powerpc/kernel/dt_cpu_ftrs.c
index 4be1c0d..96dd3d8 100644
--- a/arch/powerpc/kernel/dt_cpu_ftrs.c
+++ b/arch/powerpc/kernel/dt_cpu_ftrs.c
@@ -711,7 +711,8 @@
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_HV_ASSIST;
cur_cpu_spec->cpu_features |= CPU_FTR_P9_TM_XER_SO_BUG;
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
- } else /* DD2.1 and up have DD2_1 */
+ } else if ((version & 0xffff0000) == 0x004e0000)
+ /* DD2.1 and up have DD2_1 */
cur_cpu_spec->cpu_features |= CPU_FTR_POWER9_DD2_1;
if ((version & 0xffff0000) == 0x004e0000) {
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 62b1a40..40b44bb 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -701,11 +701,18 @@
unsigned long event, void *ptr)
{
/*
+ * panic does a local_irq_disable, but we really
+ * want interrupts to be hard disabled.
+ */
+ hard_irq_disable();
+
+ /*
* If firmware-assisted dump has been registered then trigger
* firmware-assisted dump and let firmware handle everything else.
*/
crash_fadump(NULL, ptr);
- ppc_md.panic(ptr); /* May not return */
+ if (ppc_md.panic)
+ ppc_md.panic(ptr); /* May not return */
return NOTIFY_DONE;
}
@@ -716,7 +723,8 @@
void __init setup_panic(void)
{
- if (!ppc_md.panic)
+ /* PPC64 always does a hard irq disable in its panic handler */
+ if (!IS_ENABLED(CONFIG_PPC64) && !ppc_md.panic)
return;
atomic_notifier_chain_register(&panic_notifier_list, &ppc_panic_block);
}
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 7a7ce8a..225bc5f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -387,6 +387,14 @@
#endif /* CONFIG_SMP */
+void panic_smp_self_stop(void)
+{
+ hard_irq_disable();
+ spin_begin();
+ while (1)
+ spin_cpu_relax();
+}
+
#if defined(CONFIG_SMP) || defined(CONFIG_KEXEC_CORE)
static bool use_spinloop(void)
{
diff --git a/arch/powerpc/kernel/signal.c b/arch/powerpc/kernel/signal.c
index 17fe433..b3e8db3 100644
--- a/arch/powerpc/kernel/signal.c
+++ b/arch/powerpc/kernel/signal.c
@@ -134,7 +134,7 @@
/* Re-enable the breakpoints for the signal stack */
thread_change_pc(tsk, tsk->thread.regs);
- rseq_signal_deliver(tsk->thread.regs);
+ rseq_signal_deliver(&ksig, tsk->thread.regs);
if (is32) {
if (ksig.ka.sa.sa_flags & SA_SIGINFO)
@@ -170,7 +170,7 @@
if (thread_info_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
user_enter();
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index 5eadfff..4794d6b 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -600,9 +600,6 @@
nmi_ipi_busy_count--;
nmi_ipi_unlock();
- /* Remove this CPU */
- set_cpu_online(smp_processor_id(), false);
-
spin_begin();
while (1)
spin_cpu_relax();
@@ -617,9 +614,6 @@
static void stop_this_cpu(void *dummy)
{
- /* Remove this CPU */
- set_cpu_online(smp_processor_id(), false);
-
hard_irq_disable();
spin_begin();
while (1)
diff --git a/arch/powerpc/kernel/stacktrace.c b/arch/powerpc/kernel/stacktrace.c
index 07e97f2..e2c50b5 100644
--- a/arch/powerpc/kernel/stacktrace.c
+++ b/arch/powerpc/kernel/stacktrace.c
@@ -196,7 +196,7 @@
EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
#endif /* CONFIG_HAVE_RELIABLE_STACKTRACE */
-#ifdef CONFIG_PPC_BOOK3S_64
+#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
static void handle_backtrace_ipi(struct pt_regs *regs)
{
nmi_cpu_backtrace(regs);
@@ -242,4 +242,4 @@
{
nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
}
-#endif /* CONFIG_PPC64 */
+#endif /* defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI) */
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 7c5f479..8a9a49c 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -337,7 +337,8 @@
if (shift >= pdshift)
hugepd_free(tlb, hugepte);
else
- pgtable_free_tlb(tlb, hugepte, pdshift - shift);
+ pgtable_free_tlb(tlb, hugepte,
+ get_hugepd_cache_index(pdshift - shift));
}
static void hugetlb_free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
diff --git a/arch/powerpc/mm/pgtable-book3s64.c b/arch/powerpc/mm/pgtable-book3s64.c
index c1f4ca4..4afbfbb 100644
--- a/arch/powerpc/mm/pgtable-book3s64.c
+++ b/arch/powerpc/mm/pgtable-book3s64.c
@@ -409,6 +409,18 @@
case PUD_INDEX:
kmem_cache_free(PGT_CACHE(PUD_CACHE_INDEX), table);
break;
+#if defined(CONFIG_PPC_4K_PAGES) && defined(CONFIG_HUGETLB_PAGE)
+ /* 16M hugepd directory at pud level */
+ case HTLB_16M_INDEX:
+ BUILD_BUG_ON(H_16M_CACHE_INDEX <= 0);
+ kmem_cache_free(PGT_CACHE(H_16M_CACHE_INDEX), table);
+ break;
+ /* 16G hugepd directory at the pgd level */
+ case HTLB_16G_INDEX:
+ BUILD_BUG_ON(H_16G_CACHE_INDEX <= 0);
+ kmem_cache_free(PGT_CACHE(H_16G_CACHE_INDEX), table);
+ break;
+#endif
/* We don't free pgd table via RCU callback */
default:
BUG();
diff --git a/arch/powerpc/mm/tlb-radix.c b/arch/powerpc/mm/tlb-radix.c
index 67a6e86..1135b43 100644
--- a/arch/powerpc/mm/tlb-radix.c
+++ b/arch/powerpc/mm/tlb-radix.c
@@ -689,22 +689,17 @@
static unsigned long tlb_single_page_flush_ceiling __read_mostly = 33;
static unsigned long tlb_local_single_page_flush_ceiling __read_mostly = POWER9_TLB_SETS_RADIX * 2;
-void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
+static inline void __radix__flush_tlb_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ bool flush_all_sizes)
{
- struct mm_struct *mm = vma->vm_mm;
unsigned long pid;
unsigned int page_shift = mmu_psize_defs[mmu_virtual_psize].shift;
unsigned long page_size = 1UL << page_shift;
unsigned long nr_pages = (end - start) >> page_shift;
bool local, full;
-#ifdef CONFIG_HUGETLB_PAGE
- if (is_vm_hugetlb_page(vma))
- return radix__flush_hugetlb_tlb_range(vma, start, end);
-#endif
-
pid = mm->context.id;
if (unlikely(pid == MMU_NO_CONTEXT))
return;
@@ -738,37 +733,64 @@
_tlbie_pid(pid, RIC_FLUSH_TLB);
}
} else {
- bool hflush = false;
+ bool hflush = flush_all_sizes;
+ bool gflush = flush_all_sizes;
unsigned long hstart, hend;
+ unsigned long gstart, gend;
-#ifdef CONFIG_TRANSPARENT_HUGEPAGE
- hstart = (start + HPAGE_PMD_SIZE - 1) >> HPAGE_PMD_SHIFT;
- hend = end >> HPAGE_PMD_SHIFT;
- if (hstart < hend) {
- hstart <<= HPAGE_PMD_SHIFT;
- hend <<= HPAGE_PMD_SHIFT;
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE))
hflush = true;
+
+ if (hflush) {
+ hstart = (start + PMD_SIZE - 1) & PMD_MASK;
+ hend = end & PMD_MASK;
+ if (hstart == hend)
+ hflush = false;
}
-#endif
+
+ if (gflush) {
+ gstart = (start + PUD_SIZE - 1) & PUD_MASK;
+ gend = end & PUD_MASK;
+ if (gstart == gend)
+ gflush = false;
+ }
asm volatile("ptesync": : :"memory");
if (local) {
__tlbiel_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush)
__tlbiel_va_range(hstart, hend, pid,
- HPAGE_PMD_SIZE, MMU_PAGE_2M);
+ PMD_SIZE, MMU_PAGE_2M);
+ if (gflush)
+ __tlbiel_va_range(gstart, gend, pid,
+ PUD_SIZE, MMU_PAGE_1G);
asm volatile("ptesync": : :"memory");
} else {
__tlbie_va_range(start, end, pid, page_size, mmu_virtual_psize);
if (hflush)
__tlbie_va_range(hstart, hend, pid,
- HPAGE_PMD_SIZE, MMU_PAGE_2M);
+ PMD_SIZE, MMU_PAGE_2M);
+ if (gflush)
+ __tlbie_va_range(gstart, gend, pid,
+ PUD_SIZE, MMU_PAGE_1G);
fixup_tlbie();
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
}
preempt_enable();
}
+
+void radix__flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+
+{
+#ifdef CONFIG_HUGETLB_PAGE
+ if (is_vm_hugetlb_page(vma))
+ return radix__flush_hugetlb_tlb_range(vma, start, end);
+#endif
+
+ __radix__flush_tlb_range(vma->vm_mm, start, end, false);
+}
EXPORT_SYMBOL(radix__flush_tlb_range);
static int radix_get_mmu_psize(int page_size)
@@ -837,6 +859,8 @@
int psize = 0;
struct mm_struct *mm = tlb->mm;
int page_size = tlb->page_size;
+ unsigned long start = tlb->start;
+ unsigned long end = tlb->end;
/*
* if page size is not something we understand, do a full mm flush
@@ -847,15 +871,45 @@
*/
if (tlb->fullmm) {
__flush_all_mm(mm, true);
+#if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLB_PAGE)
+ } else if (mm_tlb_flush_nested(mm)) {
+ /*
+ * If there is a concurrent invalidation that is clearing ptes,
+ * then it's possible this invalidation will miss one of those
+ * cleared ptes and miss flushing the TLB. If this invalidate
+ * returns before the other one flushes TLBs, that can result
+ * in it returning while there are still valid TLBs inside the
+ * range to be invalidated.
+ *
+ * See mm/memory.c:tlb_finish_mmu() for more details.
+ *
+ * The solution to this is ensure the entire range is always
+ * flushed here. The problem for powerpc is that the flushes
+ * are page size specific, so this "forced flush" would not
+ * do the right thing if there are a mix of page sizes in
+ * the range to be invalidated. So use __flush_tlb_range
+ * which invalidates all possible page sizes in the range.
+ *
+ * PWC flush probably is not be required because the core code
+ * shouldn't free page tables in this path, but accounting
+ * for the possibility makes us a bit more robust.
+ *
+ * need_flush_all is an uncommon case because page table
+ * teardown should be done with exclusive locks held (but
+ * after locks are dropped another invalidate could come
+ * in), it could be optimized further if necessary.
+ */
+ if (!tlb->need_flush_all)
+ __radix__flush_tlb_range(mm, start, end, true);
+ else
+ radix__flush_all_mm(mm);
+#endif
} else if ( (psize = radix_get_mmu_psize(page_size)) == -1) {
if (!tlb->need_flush_all)
radix__flush_tlb_mm(mm);
else
radix__flush_all_mm(mm);
} else {
- unsigned long start = tlb->start;
- unsigned long end = tlb->end;
-
if (!tlb->need_flush_all)
radix__flush_tlb_range_psize(mm, start, end, psize);
else
@@ -1043,6 +1097,8 @@
for (; sib <= cpu_last_thread_sibling(cpu) && !flush; sib++) {
if (sib == cpu)
continue;
+ if (!cpu_possible(sib))
+ continue;
if (paca_ptrs[sib]->kvm_hstate.kvm_vcpu)
flush = true;
}
diff --git a/arch/x86/Makefile b/arch/x86/Makefile
index f0a6ea2..a08e828 100644
--- a/arch/x86/Makefile
+++ b/arch/x86/Makefile
@@ -258,11 +258,6 @@
archheaders:
$(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
-archprepare:
-ifeq ($(CONFIG_KEXEC_FILE),y)
- $(Q)$(MAKE) $(build)=arch/x86/purgatory arch/x86/purgatory/kexec-purgatory.c
-endif
-
###
# Kernel objects
@@ -327,7 +322,6 @@
$(Q)rm -rf $(objtree)/arch/x86_64
$(Q)$(MAKE) $(clean)=$(boot)
$(Q)$(MAKE) $(clean)=arch/x86/tools
- $(Q)$(MAKE) $(clean)=arch/x86/purgatory
define archhelp
echo '* bzImage - Compressed kernel image (arch/x86/boot/bzImage)'
diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
index a8a8642..e57665b 100644
--- a/arch/x86/boot/compressed/eboot.c
+++ b/arch/x86/boot/compressed/eboot.c
@@ -118,7 +118,7 @@
void *romimage;
status = efi_call_proto(efi_pci_io_protocol, attributes, pci,
- EfiPciIoAttributeOperationGet, 0, 0,
+ EfiPciIoAttributeOperationGet, 0ULL,
&attributes);
if (status != EFI_SUCCESS)
return status;
diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c
index 9219087..3b2490b 100644
--- a/arch/x86/entry/common.c
+++ b/arch/x86/entry/common.c
@@ -164,7 +164,7 @@
if (cached_flags & _TIF_NOTIFY_RESUME) {
clear_thread_flag(TIF_NOTIFY_RESUME);
tracehook_notify_resume(regs);
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(NULL, regs);
}
if (cached_flags & _TIF_USER_RETURN_NOTIFY)
diff --git a/arch/x86/include/asm/barrier.h b/arch/x86/include/asm/barrier.h
index 042b5e8..14de043 100644
--- a/arch/x86/include/asm/barrier.h
+++ b/arch/x86/include/asm/barrier.h
@@ -38,7 +38,7 @@
{
unsigned long mask;
- asm ("cmp %1,%2; sbb %0,%0;"
+ asm volatile ("cmp %1,%2; sbb %0,%0;"
:"=r" (mask)
:"g"(size),"r" (index)
:"cc");
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 425e6b8..6aa8499 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -114,6 +114,7 @@
#define VMX_MISC_PREEMPTION_TIMER_RATE_MASK 0x0000001f
#define VMX_MISC_SAVE_EFER_LMA 0x00000020
#define VMX_MISC_ACTIVITY_HLT 0x00000040
+#define VMX_MISC_ZERO_LEN_INS 0x40000000
/* VMFUNC functions */
#define VMX_VMFUNC_EPTP_SWITCHING 0x00000001
@@ -351,11 +352,13 @@
#define VECTORING_INFO_VALID_MASK INTR_INFO_VALID_MASK
#define INTR_TYPE_EXT_INTR (0 << 8) /* external interrupt */
+#define INTR_TYPE_RESERVED (1 << 8) /* reserved */
#define INTR_TYPE_NMI_INTR (2 << 8) /* NMI */
#define INTR_TYPE_HARD_EXCEPTION (3 << 8) /* processor exception */
#define INTR_TYPE_SOFT_INTR (4 << 8) /* software interrupt */
#define INTR_TYPE_PRIV_SW_EXCEPTION (5 << 8) /* ICE breakpoint - undocumented */
#define INTR_TYPE_SOFT_EXCEPTION (6 << 8) /* software exception */
+#define INTR_TYPE_OTHER_EVENT (7 << 8) /* other event */
/* GUEST_INTERRUPTIBILITY_INFO flags. */
#define GUEST_INTR_STATE_STI 0x00000001
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index efaf2d4..d492752 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -26,6 +26,7 @@
#include <linux/delay.h>
#include <linux/crash_dump.h>
#include <linux/reboot.h>
+#include <linux/memory.h>
#include <asm/uv/uv_mmrs.h>
#include <asm/uv/uv_hub.h>
@@ -392,6 +393,51 @@
}
EXPORT_SYMBOL(uv_hub_info_version);
+/* Default UV memory block size is 2GB */
+static unsigned long mem_block_size = (2UL << 30);
+
+/* Kernel parameter to specify UV mem block size */
+static int parse_mem_block_size(char *ptr)
+{
+ unsigned long size = memparse(ptr, NULL);
+
+ /* Size will be rounded down by set_block_size() below */
+ mem_block_size = size;
+ return 0;
+}
+early_param("uv_memblksize", parse_mem_block_size);
+
+static __init int adj_blksize(u32 lgre)
+{
+ unsigned long base = (unsigned long)lgre << UV_GAM_RANGE_SHFT;
+ unsigned long size;
+
+ for (size = mem_block_size; size > MIN_MEMORY_BLOCK_SIZE; size >>= 1)
+ if (IS_ALIGNED(base, size))
+ break;
+
+ if (size >= mem_block_size)
+ return 0;
+
+ mem_block_size = size;
+ return 1;
+}
+
+static __init void set_block_size(void)
+{
+ unsigned int order = ffs(mem_block_size);
+
+ if (order) {
+ /* adjust for ffs return of 1..64 */
+ set_memory_block_size_order(order - 1);
+ pr_info("UV: mem_block_size set to 0x%lx\n", mem_block_size);
+ } else {
+ /* bad or zero value, default to 1UL << 31 (2GB) */
+ pr_err("UV: mem_block_size error with 0x%lx\n", mem_block_size);
+ set_memory_block_size_order(31);
+ }
+}
+
/* Build GAM range lookup table: */
static __init void build_uv_gr_table(void)
{
@@ -1180,23 +1226,30 @@
<< UV_GAM_RANGE_SHFT);
int order = 0;
char suffix[] = " KMGTPE";
+ int flag = ' ';
while (size > 9999 && order < sizeof(suffix)) {
size /= 1024;
order++;
}
+ /* adjust max block size to current range start */
+ if (gre->type == 1 || gre->type == 2)
+ if (adj_blksize(lgre))
+ flag = '*';
+
if (!index) {
pr_info("UV: GAM Range Table...\n");
- pr_info("UV: # %20s %14s %5s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
+ pr_info("UV: # %20s %14s %6s %4s %5s %3s %2s\n", "Range", "", "Size", "Type", "NASID", "SID", "PN");
}
- pr_info("UV: %2d: 0x%014lx-0x%014lx %5lu%c %3d %04x %02x %02x\n",
+ pr_info("UV: %2d: 0x%014lx-0x%014lx%c %5lu%c %3d %04x %02x %02x\n",
index++,
(unsigned long)lgre << UV_GAM_RANGE_SHFT,
(unsigned long)gre->limit << UV_GAM_RANGE_SHFT,
- size, suffix[order],
+ flag, size, suffix[order],
gre->type, gre->nasid, gre->sockid, gre->pnode);
+ /* update to next range start */
lgre = gre->limit;
if (sock_min > gre->sockid)
sock_min = gre->sockid;
@@ -1427,6 +1480,7 @@
build_socket_tables();
build_uv_gr_table();
+ set_block_size();
uv_init_hub_info(&hub_info);
uv_possible_blades = num_possible_nodes();
if (!_node_to_pnode)
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index cd0fda1..404df26 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -27,6 +27,7 @@
#include <asm/pgtable.h>
#include <asm/set_memory.h>
#include <asm/intel-family.h>
+#include <asm/hypervisor.h>
static void __init spectre_v2_select_mitigation(void);
static void __init ssb_select_mitigation(void);
@@ -664,6 +665,9 @@
if (boot_cpu_has(X86_FEATURE_PTI))
return sprintf(buf, "Mitigation: PTI\n");
+ if (hypervisor_is_type(X86_HYPER_XEN_PV))
+ return sprintf(buf, "Unknown (XEN PV detected, hypervisor mitigation required)\n");
+
break;
case X86_BUG_SPECTRE_V1:
diff --git a/arch/x86/kernel/cpu/cacheinfo.c b/arch/x86/kernel/cpu/cacheinfo.c
index 38354c6..0c5fcbd 100644
--- a/arch/x86/kernel/cpu/cacheinfo.c
+++ b/arch/x86/kernel/cpu/cacheinfo.c
@@ -671,7 +671,7 @@
num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
if (num_sharing_cache) {
- int bits = get_count_order(num_sharing_cache) - 1;
+ int bits = get_count_order(num_sharing_cache);
per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
}
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 0df7151..eb4cb3e 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -1,3 +1,6 @@
+/* cpu_feature_enabled() cannot be used this early */
+#define USE_EARLY_PGTABLE_L5
+
#include <linux/bootmem.h>
#include <linux/linkage.h>
#include <linux/bitops.h>
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 5bbd06f..f34d89c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -160,6 +160,11 @@
SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
USER
),
+ MCESEV(
+ PANIC, "Data load in unrecoverable area of kernel",
+ SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
+ KERNEL
+ ),
#endif
MCESEV(
PANIC, "Action required: unknown MCACOD",
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index e4cf6ff..c102ad5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -772,23 +772,25 @@
static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
struct pt_regs *regs)
{
- int i, ret = 0;
char *tmp;
+ int i;
for (i = 0; i < mca_cfg.banks; i++) {
m->status = mce_rdmsrl(msr_ops.status(i));
- if (m->status & MCI_STATUS_VAL) {
- __set_bit(i, validp);
- if (quirk_no_way_out)
- quirk_no_way_out(i, m, regs);
- }
+ if (!(m->status & MCI_STATUS_VAL))
+ continue;
+
+ __set_bit(i, validp);
+ if (quirk_no_way_out)
+ quirk_no_way_out(i, m, regs);
if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+ mce_read_aux(m, i);
*msg = tmp;
- ret = 1;
+ return 1;
}
}
- return ret;
+ return 0;
}
/*
@@ -1205,13 +1207,18 @@
lmce = m.mcgstatus & MCG_STATUS_LMCES;
/*
+ * Local machine check may already know that we have to panic.
+ * Broadcast machine check begins rendezvous in mce_start()
* Go through all banks in exclusion of the other CPUs. This way we
* don't report duplicated events on shared banks because the first one
- * to see it will clear it. If this is a Local MCE, then no need to
- * perform rendezvous.
+ * to see it will clear it.
*/
- if (!lmce)
+ if (lmce) {
+ if (no_way_out)
+ mce_panic("Fatal local machine check", &m, msg);
+ } else {
order = mce_start(&no_way_out);
+ }
for (i = 0; i < cfg->banks; i++) {
__clear_bit(i, toclear);
@@ -1287,12 +1294,17 @@
no_way_out = worst >= MCE_PANIC_SEVERITY;
} else {
/*
- * Local MCE skipped calling mce_reign()
- * If we found a fatal error, we need to panic here.
+ * If there was a fatal machine check we should have
+ * already called mce_panic earlier in this function.
+ * Since we re-read the banks, we might have found
+ * something new. Check again to see if we found a
+ * fatal error. We call "mce_severity()" again to
+ * make sure we have the right "msg".
*/
- if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
- mce_panic("Machine check from unknown source",
- NULL, NULL);
+ if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3) {
+ mce_severity(&m, cfg->tolerant, &msg, true);
+ mce_panic("Local fatal machine check!", &m, msg);
+ }
}
/*
diff --git a/arch/x86/kernel/cpu/microcode/intel.c b/arch/x86/kernel/cpu/microcode/intel.c
index 1c2cfa0..97ccf4c 100644
--- a/arch/x86/kernel/cpu/microcode/intel.c
+++ b/arch/x86/kernel/cpu/microcode/intel.c
@@ -190,8 +190,11 @@
p = memdup_patch(data, size);
if (!p)
pr_err("Error allocating buffer %p\n", data);
- else
+ else {
list_replace(&iter->plist, &p->plist);
+ kfree(iter->data);
+ kfree(iter);
+ }
}
}
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index a21d6ac..8047379 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -44,7 +44,7 @@
pmdval_t early_pmd_flags = __PAGE_KERNEL_LARGE & ~(_PAGE_GLOBAL | _PAGE_NX);
#ifdef CONFIG_X86_5LEVEL
-unsigned int __pgtable_l5_enabled __initdata;
+unsigned int __pgtable_l5_enabled __ro_after_init;
unsigned int pgdir_shift __ro_after_init = 39;
EXPORT_SYMBOL(pgdir_shift);
unsigned int ptrs_per_p4d __ro_after_init = 1;
diff --git a/arch/x86/kernel/quirks.c b/arch/x86/kernel/quirks.c
index 697a4ce..736348e 100644
--- a/arch/x86/kernel/quirks.c
+++ b/arch/x86/kernel/quirks.c
@@ -645,12 +645,19 @@
/* Skylake */
static void quirk_intel_purley_xeon_ras_cap(struct pci_dev *pdev)
{
- u32 capid0;
+ u32 capid0, capid5;
pci_read_config_dword(pdev, 0x84, &capid0);
+ pci_read_config_dword(pdev, 0x98, &capid5);
- if ((capid0 & 0xc0) == 0xc0)
+ /*
+ * CAPID0{7:6} indicate whether this is an advanced RAS SKU
+ * CAPID5{8:5} indicate that various NVDIMM usage modes are
+ * enabled, so memory machine check recovery is also enabled.
+ */
+ if ((capid0 & 0xc0) == 0xc0 || (capid5 & 0x1e0))
static_branch_inc(&mcsafe_key);
+
}
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x0ec3, quirk_intel_brickland_xeon_ras_cap);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x2fc0, quirk_intel_brickland_xeon_ras_cap);
diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
index 445ca11..92a3b31 100644
--- a/arch/x86/kernel/signal.c
+++ b/arch/x86/kernel/signal.c
@@ -692,7 +692,7 @@
* Increment event counter and perform fixup for the pre-signal
* frame.
*/
- rseq_signal_deliver(regs);
+ rseq_signal_deliver(ksig, regs);
/* Set up the stack frame */
if (is_ia32_frame(ksig)) {
diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
index a535dd6..e6db475 100644
--- a/arch/x86/kernel/traps.c
+++ b/arch/x86/kernel/traps.c
@@ -835,16 +835,18 @@
char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
"simd exception";
- if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, SIGFPE) == NOTIFY_STOP)
- return;
cond_local_irq_enable(regs);
if (!user_mode(regs)) {
- if (!fixup_exception(regs, trapnr)) {
- task->thread.error_code = error_code;
- task->thread.trap_nr = trapnr;
+ if (fixup_exception(regs, trapnr))
+ return;
+
+ task->thread.error_code = error_code;
+ task->thread.trap_nr = trapnr;
+
+ if (notify_die(DIE_TRAP, str, regs, error_code,
+ trapnr, SIGFPE) != NOTIFY_STOP)
die(str, regs, error_code);
- }
return;
}
diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
index 58d8d80..deb576b 100644
--- a/arch/x86/kernel/uprobes.c
+++ b/arch/x86/kernel/uprobes.c
@@ -293,7 +293,7 @@
insn_init(insn, auprobe->insn, sizeof(auprobe->insn), x86_64);
/* has the side-effect of processing the entire instruction */
insn_get_length(insn);
- if (WARN_ON_ONCE(!insn_complete(insn)))
+ if (!insn_complete(insn))
return -ENOEXEC;
if (is_prefix_bad(insn))
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 559a12b..1689f43 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1705,6 +1705,17 @@
MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS;
}
+static inline bool nested_cpu_has_zero_length_injection(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.msrs.misc_low & VMX_MISC_ZERO_LEN_INS;
+}
+
+static inline bool nested_cpu_supports_monitor_trap_flag(struct kvm_vcpu *vcpu)
+{
+ return to_vmx(vcpu)->nested.msrs.procbased_ctls_high &
+ CPU_BASED_MONITOR_TRAP_FLAG;
+}
+
static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
{
return vmcs12->cpu_based_vm_exec_control & bit;
@@ -11620,6 +11631,62 @@
!nested_cr3_valid(vcpu, vmcs12->host_cr3))
return VMXERR_ENTRY_INVALID_HOST_STATE_FIELD;
+ /*
+ * From the Intel SDM, volume 3:
+ * Fields relevant to VM-entry event injection must be set properly.
+ * These fields are the VM-entry interruption-information field, the
+ * VM-entry exception error code, and the VM-entry instruction length.
+ */
+ if (vmcs12->vm_entry_intr_info_field & INTR_INFO_VALID_MASK) {
+ u32 intr_info = vmcs12->vm_entry_intr_info_field;
+ u8 vector = intr_info & INTR_INFO_VECTOR_MASK;
+ u32 intr_type = intr_info & INTR_INFO_INTR_TYPE_MASK;
+ bool has_error_code = intr_info & INTR_INFO_DELIVER_CODE_MASK;
+ bool should_have_error_code;
+ bool urg = nested_cpu_has2(vmcs12,
+ SECONDARY_EXEC_UNRESTRICTED_GUEST);
+ bool prot_mode = !urg || vmcs12->guest_cr0 & X86_CR0_PE;
+
+ /* VM-entry interruption-info field: interruption type */
+ if (intr_type == INTR_TYPE_RESERVED ||
+ (intr_type == INTR_TYPE_OTHER_EVENT &&
+ !nested_cpu_supports_monitor_trap_flag(vcpu)))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ /* VM-entry interruption-info field: vector */
+ if ((intr_type == INTR_TYPE_NMI_INTR && vector != NMI_VECTOR) ||
+ (intr_type == INTR_TYPE_HARD_EXCEPTION && vector > 31) ||
+ (intr_type == INTR_TYPE_OTHER_EVENT && vector != 0))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ /* VM-entry interruption-info field: deliver error code */
+ should_have_error_code =
+ intr_type == INTR_TYPE_HARD_EXCEPTION && prot_mode &&
+ x86_exception_has_error_code(vector);
+ if (has_error_code != should_have_error_code)
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ /* VM-entry exception error code */
+ if (has_error_code &&
+ vmcs12->vm_entry_exception_error_code & GENMASK(31, 15))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ /* VM-entry interruption-info field: reserved bits */
+ if (intr_info & INTR_INFO_RESVD_BITS_MASK)
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+
+ /* VM-entry instruction length */
+ switch (intr_type) {
+ case INTR_TYPE_SOFT_EXCEPTION:
+ case INTR_TYPE_SOFT_INTR:
+ case INTR_TYPE_PRIV_SW_EXCEPTION:
+ if ((vmcs12->vm_entry_instruction_len > 15) ||
+ (vmcs12->vm_entry_instruction_len == 0 &&
+ !nested_cpu_has_zero_length_injection(vcpu)))
+ return VMXERR_ENTRY_INVALID_CONTROL_FIELD;
+ }
+ }
+
return 0;
}
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index 331993c..257f276 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -110,6 +110,15 @@
#endif
}
+static inline bool x86_exception_has_error_code(unsigned int vector)
+{
+ static u32 exception_has_error_code = BIT(DF_VECTOR) | BIT(TS_VECTOR) |
+ BIT(NP_VECTOR) | BIT(SS_VECTOR) | BIT(GP_VECTOR) |
+ BIT(PF_VECTOR) | BIT(AC_VECTOR);
+
+ return (1U << vector) & exception_has_error_code;
+}
+
static inline bool mmu_is_nested(struct kvm_vcpu *vcpu)
{
return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu;
diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
index 045f492..a688617 100644
--- a/arch/x86/mm/init_64.c
+++ b/arch/x86/mm/init_64.c
@@ -1350,16 +1350,28 @@
/* Amount of ram needed to start using large blocks */
#define MEM_SIZE_FOR_LARGE_BLOCK (64UL << 30)
+/* Adjustable memory block size */
+static unsigned long set_memory_block_size;
+int __init set_memory_block_size_order(unsigned int order)
+{
+ unsigned long size = 1UL << order;
+
+ if (size > MEM_SIZE_FOR_LARGE_BLOCK || size < MIN_MEMORY_BLOCK_SIZE)
+ return -EINVAL;
+
+ set_memory_block_size = size;
+ return 0;
+}
+
static unsigned long probe_memory_block_size(void)
{
unsigned long boot_mem_end = max_pfn << PAGE_SHIFT;
unsigned long bz;
- /* If this is UV system, always set 2G block size */
- if (is_uv_system()) {
- bz = MAX_BLOCK_SIZE;
+ /* If memory block size has been set, then use it */
+ bz = set_memory_block_size;
+ if (bz)
goto done;
- }
/* Use regular block if RAM is smaller than MEM_SIZE_FOR_LARGE_BLOCK */
if (boot_mem_end < MEM_SIZE_FOR_LARGE_BLOCK) {
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c9081c6..3b53185 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -65,6 +65,13 @@
EXPORT_SYMBOL_GPL(xen_have_vector_callback);
/*
+ * NB: needs to live in .data because it's used by xen_prepare_pvh which runs
+ * before clearing the bss.
+ */
+uint32_t xen_start_flags __attribute__((section(".data"))) = 0;
+EXPORT_SYMBOL(xen_start_flags);
+
+/*
* Point at some empty memory to start with. We map the real shared_info
* page as soon as fixmap is up and running.
*/
diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c
index 357969a..8d4e2e1 100644
--- a/arch/x86/xen/enlighten_pv.c
+++ b/arch/x86/xen/enlighten_pv.c
@@ -1203,6 +1203,7 @@
return;
xen_domain_type = XEN_PV_DOMAIN;
+ xen_start_flags = xen_start_info->flags;
xen_setup_features();
diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c
index aa1c6a68..c85d1a8 100644
--- a/arch/x86/xen/enlighten_pvh.c
+++ b/arch/x86/xen/enlighten_pvh.c
@@ -97,6 +97,7 @@
}
xen_pvh = 1;
+ xen_start_flags = pvh_start_info.flags;
msr = cpuid_ebx(xen_cpuid_base() + 2);
pfn = __pa(hypercall_page);
diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c
index 2e20ae2f..e3b18ad 100644
--- a/arch/x86/xen/smp_pv.c
+++ b/arch/x86/xen/smp_pv.c
@@ -32,6 +32,7 @@
#include <xen/interface/vcpu.h>
#include <xen/interface/xenpmu.h>
+#include <asm/spec-ctrl.h>
#include <asm/xen/interface.h>
#include <asm/xen/hypercall.h>
@@ -70,6 +71,8 @@
cpu_data(cpu).x86_max_cores = 1;
set_cpu_sibling_map(cpu);
+ speculative_store_bypass_ht_init();
+
xen_setup_cpu_clockevents();
notify_cpu_starting(cpu);
@@ -250,6 +253,8 @@
}
set_cpu_sibling_map(0);
+ speculative_store_bypass_ht_init();
+
xen_pmu_init(0);
if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0))
diff --git a/block/bio.c b/block/bio.c
index 9710e27..67eff5e 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -1807,9 +1807,6 @@
if (!bio_integrity_endio(bio))
return;
- if (WARN_ONCE(bio->bi_next, "driver left bi_next not NULL"))
- bio->bi_next = NULL;
-
/*
* Need to have a real endio function for chained bios, otherwise
* various corner cases will break (like stacking block devices that
diff --git a/block/blk-core.c b/block/blk-core.c
index cf0ee76..afd2596 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -273,10 +273,6 @@
bio_advance(bio, nbytes);
/* don't actually finish bio if it's part of flush sequence */
- /*
- * XXX this code looks suspicious - it's not consistent with advancing
- * req->bio in caller
- */
if (bio->bi_iter.bi_size == 0 && !(rq->rq_flags & RQF_FLUSH_SEQ))
bio_endio(bio);
}
@@ -3081,10 +3077,8 @@
struct bio *bio = req->bio;
unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
- if (bio_bytes == bio->bi_iter.bi_size) {
+ if (bio_bytes == bio->bi_iter.bi_size)
req->bio = bio->bi_next;
- bio->bi_next = NULL;
- }
/* Completion has already been traced */
bio_clear_flag(bio, BIO_TRACE_COMPLETION);
diff --git a/block/blk-mq-debugfs.c b/block/blk-mq-debugfs.c
index ffa6223..1c4532e 100644
--- a/block/blk-mq-debugfs.c
+++ b/block/blk-mq-debugfs.c
@@ -356,7 +356,7 @@
static const char *blk_mq_rq_state_name(enum mq_rq_state rq_state)
{
- if (WARN_ON_ONCE((unsigned int)rq_state >
+ if (WARN_ON_ONCE((unsigned int)rq_state >=
ARRAY_SIZE(blk_mq_rq_state_name_array)))
return "(?)";
return blk_mq_rq_state_name_array[rq_state];
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 70c65bb..b429d51 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -781,7 +781,6 @@
WARN_ON_ONCE(ret != BLK_EH_RESET_TIMER);
}
- req->rq_flags &= ~RQF_TIMED_OUT;
blk_add_timer(req);
}
diff --git a/block/blk-softirq.c b/block/blk-softirq.c
index 01e2b35..15c1f5e 100644
--- a/block/blk-softirq.c
+++ b/block/blk-softirq.c
@@ -144,6 +144,7 @@
local_irq_restore(flags);
}
+EXPORT_SYMBOL(__blk_complete_request);
/**
* blk_complete_request - end I/O on a request
diff --git a/block/blk-timeout.c b/block/blk-timeout.c
index 4b8a48d..f2cfd56 100644
--- a/block/blk-timeout.c
+++ b/block/blk-timeout.c
@@ -210,6 +210,7 @@
if (!req->timeout)
req->timeout = q->rq_timeout;
+ req->rq_flags &= ~RQF_TIMED_OUT;
blk_rq_set_deadline(req, jiffies + req->timeout);
/*
diff --git a/block/sed-opal.c b/block/sed-opal.c
index 945f4b8..e0de4dd 100644
--- a/block/sed-opal.c
+++ b/block/sed-opal.c
@@ -877,7 +877,7 @@
return 0;
}
- if (n > resp->num) {
+ if (n >= resp->num) {
pr_debug("Response has %d tokens. Can't access %d\n",
resp->num, n);
return 0;
@@ -916,7 +916,7 @@
return 0;
}
- if (n > resp->num) {
+ if (n >= resp->num) {
pr_debug("Response has %d tokens. Can't access %d\n",
resp->num, n);
return 0;
diff --git a/crypto/morus640.c b/crypto/morus640.c
index 9fbcde3..5eede37 100644
--- a/crypto/morus640.c
+++ b/crypto/morus640.c
@@ -274,8 +274,9 @@
union morus640_block_in tail;
memcpy(tail.bytes, src, size);
+ memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
- crypto_morus640_load_a(&m, src);
+ crypto_morus640_load_a(&m, tail.bytes);
crypto_morus640_core(state, &m);
crypto_morus640_store_a(tail.bytes, &m);
memset(tail.bytes + size, 0, MORUS640_BLOCK_SIZE - size);
diff --git a/crypto/sha3_generic.c b/crypto/sha3_generic.c
index 264ec12..7f6735d 100644
--- a/crypto/sha3_generic.c
+++ b/crypto/sha3_generic.c
@@ -152,7 +152,7 @@
st[24] ^= bc[ 4];
}
-static void __optimize("O3") keccakf(u64 st[25])
+static void keccakf(u64 st[25])
{
int round;
diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c
index 38a2869..f8fecfe 100644
--- a/drivers/acpi/acpi_lpss.c
+++ b/drivers/acpi/acpi_lpss.c
@@ -22,6 +22,7 @@
#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/pwm.h>
+#include <linux/suspend.h>
#include <linux/delay.h>
#include "internal.h"
@@ -946,9 +947,10 @@
mutex_unlock(&lpss_iosf_mutex);
}
-static int acpi_lpss_suspend(struct device *dev, bool wakeup)
+static int acpi_lpss_suspend(struct device *dev, bool runtime)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
+ bool wakeup = runtime || device_may_wakeup(dev);
int ret;
if (pdata->dev_desc->flags & LPSS_SAVE_CTX)
@@ -961,13 +963,14 @@
* wrong status for devices being about to be powered off. See
* lpss_iosf_enter_d3_state() for further information.
*/
- if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+ if ((runtime || !pm_suspend_via_firmware()) &&
+ lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_enter_d3_state();
return ret;
}
-static int acpi_lpss_resume(struct device *dev)
+static int acpi_lpss_resume(struct device *dev, bool runtime)
{
struct lpss_private_data *pdata = acpi_driver_data(ACPI_COMPANION(dev));
int ret;
@@ -976,7 +979,8 @@
* This call is kept first to be in symmetry with
* acpi_lpss_runtime_suspend() one.
*/
- if (lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
+ if ((runtime || !pm_resume_via_firmware()) &&
+ lpss_quirks & LPSS_QUIRK_ALWAYS_POWER_ON && iosf_mbi_available())
lpss_iosf_exit_d3_state();
ret = acpi_dev_resume(dev);
@@ -1000,12 +1004,12 @@
return 0;
ret = pm_generic_suspend_late(dev);
- return ret ? ret : acpi_lpss_suspend(dev, device_may_wakeup(dev));
+ return ret ? ret : acpi_lpss_suspend(dev, false);
}
static int acpi_lpss_resume_early(struct device *dev)
{
- int ret = acpi_lpss_resume(dev);
+ int ret = acpi_lpss_resume(dev, false);
return ret ? ret : pm_generic_resume_early(dev);
}
@@ -1020,7 +1024,7 @@
static int acpi_lpss_runtime_resume(struct device *dev)
{
- int ret = acpi_lpss_resume(dev);
+ int ret = acpi_lpss_resume(dev, true);
return ret ? ret : pm_generic_runtime_resume(dev);
}
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index bb94cf0..442a9e2 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -2037,6 +2037,17 @@
}
}
+static const struct dmi_system_id acpi_ec_no_wakeup[] = {
+ {
+ .ident = "Thinkpad X1 Carbon 6th",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "LENOVO"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "20KGS3JF01"),
+ },
+ },
+ { },
+};
+
int __init acpi_ec_init(void)
{
int result;
@@ -2047,6 +2058,15 @@
if (result)
return result;
+ /*
+ * Disable EC wakeup on following systems to prevent periodic
+ * wakeup from EC GPE.
+ */
+ if (dmi_check_system(acpi_ec_no_wakeup)) {
+ ec_no_wakeup = true;
+ pr_debug("Disabling EC wakeup on suspend-to-idle\n");
+ }
+
/* Drivers must be started after acpi_ec_query_init() */
dsdt_fail = acpi_bus_register_driver(&acpi_ec_driver);
/*
diff --git a/drivers/base/core.c b/drivers/base/core.c
index 36622b5..df3e1a4 100644
--- a/drivers/base/core.c
+++ b/drivers/base/core.c
@@ -236,6 +236,13 @@
link->rpm_active = true;
}
pm_runtime_new_link(consumer);
+ /*
+ * If the link is being added by the consumer driver at probe
+ * time, balance the decrementation of the supplier's runtime PM
+ * usage counter after consumer probe in driver_probe_device().
+ */
+ if (consumer->links.status == DL_DEV_PROBING)
+ pm_runtime_get_noresume(supplier);
}
get_device(supplier);
link->supplier = supplier;
@@ -255,12 +262,12 @@
switch (consumer->links.status) {
case DL_DEV_PROBING:
/*
- * Balance the decrementation of the supplier's
- * runtime PM usage counter after consumer probe
- * in driver_probe_device().
+ * Some callers expect the link creation during
+ * consumer driver probe to resume the supplier
+ * even without DL_FLAG_RPM_ACTIVE.
*/
if (flags & DL_FLAG_PM_RUNTIME)
- pm_runtime_get_sync(supplier);
+ pm_runtime_resume(supplier);
link->status = DL_STATE_CONSUMER_PROBE;
break;
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 3b7083b..74a0556 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -76,6 +76,7 @@
#define NBD_HAS_CONFIG_REF 4
#define NBD_BOUND 5
#define NBD_DESTROY_ON_DISCONNECT 6
+#define NBD_DISCONNECT_ON_CLOSE 7
struct nbd_config {
u32 flags;
@@ -138,6 +139,7 @@
static void nbd_connect_reply(struct genl_info *info, int index);
static int nbd_genl_status(struct sk_buff *skb, struct genl_info *info);
static void nbd_dead_link_work(struct work_struct *work);
+static void nbd_disconnect_and_put(struct nbd_device *nbd);
static inline struct device *nbd_to_dev(struct nbd_device *nbd)
{
@@ -1305,6 +1307,12 @@
static void nbd_release(struct gendisk *disk, fmode_t mode)
{
struct nbd_device *nbd = disk->private_data;
+ struct block_device *bdev = bdget_disk(disk, 0);
+
+ if (test_bit(NBD_DISCONNECT_ON_CLOSE, &nbd->config->runtime_flags) &&
+ bdev->bd_openers == 0)
+ nbd_disconnect_and_put(nbd);
+
nbd_config_put(nbd);
nbd_put(nbd);
}
@@ -1705,6 +1713,10 @@
&config->runtime_flags);
put_dev = true;
}
+ if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+ set_bit(NBD_DISCONNECT_ON_CLOSE,
+ &config->runtime_flags);
+ }
}
if (info->attrs[NBD_ATTR_SOCKETS]) {
@@ -1749,6 +1761,17 @@
return ret;
}
+static void nbd_disconnect_and_put(struct nbd_device *nbd)
+{
+ mutex_lock(&nbd->config_lock);
+ nbd_disconnect(nbd);
+ nbd_clear_sock(nbd);
+ mutex_unlock(&nbd->config_lock);
+ if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
+ &nbd->config->runtime_flags))
+ nbd_config_put(nbd);
+}
+
static int nbd_genl_disconnect(struct sk_buff *skb, struct genl_info *info)
{
struct nbd_device *nbd;
@@ -1781,13 +1804,7 @@
nbd_put(nbd);
return 0;
}
- mutex_lock(&nbd->config_lock);
- nbd_disconnect(nbd);
- nbd_clear_sock(nbd);
- mutex_unlock(&nbd->config_lock);
- if (test_and_clear_bit(NBD_HAS_CONFIG_REF,
- &nbd->config->runtime_flags))
- nbd_config_put(nbd);
+ nbd_disconnect_and_put(nbd);
nbd_config_put(nbd);
nbd_put(nbd);
return 0;
@@ -1798,7 +1815,7 @@
struct nbd_device *nbd = NULL;
struct nbd_config *config;
int index;
- int ret = -EINVAL;
+ int ret = 0;
bool put_dev = false;
if (!netlink_capable(skb, CAP_SYS_ADMIN))
@@ -1838,6 +1855,7 @@
!nbd->task_recv) {
dev_err(nbd_to_dev(nbd),
"not configured, cannot reconfigure\n");
+ ret = -EINVAL;
goto out;
}
@@ -1862,6 +1880,14 @@
&config->runtime_flags))
refcount_inc(&nbd->refs);
}
+
+ if (flags & NBD_CFLAG_DISCONNECT_ON_CLOSE) {
+ set_bit(NBD_DISCONNECT_ON_CLOSE,
+ &config->runtime_flags);
+ } else {
+ clear_bit(NBD_DISCONNECT_ON_CLOSE,
+ &config->runtime_flags);
+ }
}
if (info->attrs[NBD_ATTR_SOCKETS]) {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 7948049..042c778 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -1365,7 +1365,7 @@
static enum blk_eh_timer_return null_rq_timed_out_fn(struct request *rq)
{
pr_info("null: rq %p timed out\n", rq);
- blk_mq_complete_request(rq);
+ __blk_complete_request(rq);
return BLK_EH_DONE;
}
diff --git a/drivers/char/hw_random/core.c b/drivers/char/hw_random/core.c
index 91bb98c..aaf9e5a 100644
--- a/drivers/char/hw_random/core.c
+++ b/drivers/char/hw_random/core.c
@@ -516,11 +516,18 @@
void hwrng_unregister(struct hwrng *rng)
{
+ int err;
+
mutex_lock(&rng_mutex);
list_del(&rng->list);
- if (current_rng == rng)
- enable_best_rng();
+ if (current_rng == rng) {
+ err = enable_best_rng();
+ if (err) {
+ drop_current_rng();
+ cur_rng_set_by_user = 0;
+ }
+ }
if (list_empty(&rng_list)) {
mutex_unlock(&rng_mutex);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
index e5cdc3a..2717f88 100644
--- a/drivers/clocksource/timer-stm32.c
+++ b/drivers/clocksource/timer-stm32.c
@@ -304,8 +304,10 @@
to->private_data = kzalloc(sizeof(struct stm32_timer_private),
GFP_KERNEL);
- if (!to->private_data)
+ if (!to->private_data) {
+ ret = -ENOMEM;
goto deinit;
+ }
rstc = of_reset_control_get(node, NULL);
if (!IS_ERR(rstc)) {
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index 1de5ec8..ece120d 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -294,6 +294,7 @@
static struct pstate_funcs pstate_funcs __read_mostly;
static int hwp_active __read_mostly;
+static int hwp_mode_bdw __read_mostly;
static bool per_cpu_limits __read_mostly;
static bool hwp_boost __read_mostly;
@@ -1413,7 +1414,15 @@
cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
cpu->pstate.scaling = pstate_funcs.get_scaling();
cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
- cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+
+ if (hwp_active && !hwp_mode_bdw) {
+ unsigned int phy_max, current_max;
+
+ intel_pstate_get_hwp_max(cpu->cpu, &phy_max, ¤t_max);
+ cpu->pstate.turbo_freq = phy_max * cpu->pstate.scaling;
+ } else {
+ cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
+ }
if (pstate_funcs.get_aperf_mperf_shift)
cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
@@ -2467,28 +2476,36 @@
static inline void intel_pstate_request_control_from_smm(void) {}
#endif /* CONFIG_ACPI */
+#define INTEL_PSTATE_HWP_BROADWELL 0x01
+
+#define ICPU_HWP(model, hwp_mode) \
+ { X86_VENDOR_INTEL, 6, model, X86_FEATURE_HWP, hwp_mode }
+
static const struct x86_cpu_id hwp_support_ids[] __initconst = {
- { X86_VENDOR_INTEL, 6, X86_MODEL_ANY, X86_FEATURE_HWP },
+ ICPU_HWP(INTEL_FAM6_BROADWELL_X, INTEL_PSTATE_HWP_BROADWELL),
+ ICPU_HWP(INTEL_FAM6_BROADWELL_XEON_D, INTEL_PSTATE_HWP_BROADWELL),
+ ICPU_HWP(X86_MODEL_ANY, 0),
{}
};
static int __init intel_pstate_init(void)
{
+ const struct x86_cpu_id *id;
int rc;
if (no_load)
return -ENODEV;
- if (x86_match_cpu(hwp_support_ids)) {
+ id = x86_match_cpu(hwp_support_ids);
+ if (id) {
copy_cpu_funcs(&core_funcs);
if (!no_hwp) {
hwp_active++;
+ hwp_mode_bdw = id->driver_data;
intel_pstate.attr = hwp_cpufreq_attrs;
goto hwp_cpu_matched;
}
} else {
- const struct x86_cpu_id *id;
-
id = x86_match_cpu(intel_pstate_cpu_ids);
if (!id)
return -ENODEV;
diff --git a/drivers/cpufreq/qcom-cpufreq-kryo.c b/drivers/cpufreq/qcom-cpufreq-kryo.c
index d049fe4..01bddac 100644
--- a/drivers/cpufreq/qcom-cpufreq-kryo.c
+++ b/drivers/cpufreq/qcom-cpufreq-kryo.c
@@ -42,6 +42,8 @@
NUM_OF_MSM8996_VERSIONS,
};
+struct platform_device *cpufreq_dt_pdev, *kryo_cpufreq_pdev;
+
static enum _msm8996_version __init qcom_cpufreq_kryo_get_msm_id(void)
{
size_t len;
@@ -74,7 +76,6 @@
static int qcom_cpufreq_kryo_probe(struct platform_device *pdev)
{
struct opp_table *opp_tables[NR_CPUS] = {0};
- struct platform_device *cpufreq_dt_pdev;
enum _msm8996_version msm8996_version;
struct nvmem_cell *speedbin_nvmem;
struct device_node *np;
@@ -115,6 +116,8 @@
speedbin = nvmem_cell_read(speedbin_nvmem, &len);
nvmem_cell_put(speedbin_nvmem);
+ if (IS_ERR(speedbin))
+ return PTR_ERR(speedbin);
switch (msm8996_version) {
case MSM8996_V3:
@@ -127,6 +130,7 @@
BUG();
break;
}
+ kfree(speedbin);
for_each_possible_cpu(cpu) {
cpu_dev = get_cpu_device(cpu);
@@ -162,8 +166,15 @@
return ret;
}
+static int qcom_cpufreq_kryo_remove(struct platform_device *pdev)
+{
+ platform_device_unregister(cpufreq_dt_pdev);
+ return 0;
+}
+
static struct platform_driver qcom_cpufreq_kryo_driver = {
.probe = qcom_cpufreq_kryo_probe,
+ .remove = qcom_cpufreq_kryo_remove,
.driver = {
.name = "qcom-cpufreq-kryo",
},
@@ -198,8 +209,9 @@
if (unlikely(ret < 0))
return ret;
- ret = PTR_ERR_OR_ZERO(platform_device_register_simple(
- "qcom-cpufreq-kryo", -1, NULL, 0));
+ kryo_cpufreq_pdev = platform_device_register_simple(
+ "qcom-cpufreq-kryo", -1, NULL, 0);
+ ret = PTR_ERR_OR_ZERO(kryo_cpufreq_pdev);
if (0 == ret)
return 0;
@@ -208,5 +220,12 @@
}
module_init(qcom_cpufreq_kryo_init);
+static void __init qcom_cpufreq_kryo_exit(void)
+{
+ platform_device_unregister(kryo_cpufreq_pdev);
+ platform_driver_unregister(&qcom_cpufreq_kryo_driver);
+}
+module_exit(qcom_cpufreq_kryo_exit);
+
MODULE_DESCRIPTION("Qualcomm Technologies, Inc. Kryo CPUfreq driver");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/crypto/chelsio/chtls/chtls_io.c b/drivers/crypto/chelsio/chtls/chtls_io.c
index 00c7aab..afebbd8 100644
--- a/drivers/crypto/chelsio/chtls/chtls_io.c
+++ b/drivers/crypto/chelsio/chtls/chtls_io.c
@@ -1548,15 +1548,14 @@
tp->urg_data = 0;
if ((avail + offset) >= skb->len) {
- if (likely(skb))
- chtls_free_skb(sk, skb);
- buffers_freed++;
if (ULP_SKB_CB(skb)->flags & ULPCB_FLAG_TLS_HDR) {
tp->copied_seq += skb->len;
hws->rcvpld = skb->hdr_len;
} else {
tp->copied_seq += hws->rcvpld;
}
+ chtls_free_skb(sk, skb);
+ buffers_freed++;
hws->copied_seq = 0;
if (copied >= target &&
!skb_peek(&sk->sk_receive_queue))
diff --git a/drivers/firmware/efi/libstub/tpm.c b/drivers/firmware/efi/libstub/tpm.c
index caa37a6..a90b0b8 100644
--- a/drivers/firmware/efi/libstub/tpm.c
+++ b/drivers/firmware/efi/libstub/tpm.c
@@ -64,7 +64,7 @@
efi_guid_t tcg2_guid = EFI_TCG2_PROTOCOL_GUID;
efi_guid_t linux_eventlog_guid = LINUX_EFI_TPM_EVENT_LOG_GUID;
efi_status_t status;
- efi_physical_addr_t log_location, log_last_entry;
+ efi_physical_addr_t log_location = 0, log_last_entry = 0;
struct linux_efi_tpm_eventlog *log_tbl = NULL;
unsigned long first_entry_addr, last_entry_addr;
size_t log_size, last_entry_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 3317d15..6e5284e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -2158,10 +2158,18 @@
switch (asic_type) {
#if defined(CONFIG_DRM_AMD_DC)
case CHIP_BONAIRE:
- case CHIP_HAWAII:
case CHIP_KAVERI:
case CHIP_KABINI:
case CHIP_MULLINS:
+ /*
+ * We have systems in the wild with these ASICs that require
+ * LVDS and VGA support which is not supported with DC.
+ *
+ * Fallback to the non-DC driver here by default so as not to
+ * cause regressions.
+ */
+ return amdgpu_dc > 0;
+ case CHIP_HAWAII:
case CHIP_CARRIZO:
case CHIP_STONEY:
case CHIP_POLARIS10:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index 5e4e1bd..3526efa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -762,8 +762,7 @@
domain = amdgpu_mem_type_to_domain(bo->tbo.mem.mem_type);
if (domain == AMDGPU_GEM_DOMAIN_VRAM) {
adev->vram_pin_size += amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- adev->invisible_pin_size += amdgpu_bo_size(bo);
+ adev->invisible_pin_size += amdgpu_vram_mgr_bo_invisible_size(bo);
} else if (domain == AMDGPU_GEM_DOMAIN_GTT) {
adev->gart_pin_size += amdgpu_bo_size(bo);
}
@@ -790,25 +789,22 @@
bo->pin_count--;
if (bo->pin_count)
return 0;
+
+ if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
+ adev->vram_pin_size -= amdgpu_bo_size(bo);
+ adev->invisible_pin_size -= amdgpu_vram_mgr_bo_invisible_size(bo);
+ } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
+ adev->gart_pin_size -= amdgpu_bo_size(bo);
+ }
+
for (i = 0; i < bo->placement.num_placement; i++) {
bo->placements[i].lpfn = 0;
bo->placements[i].flags &= ~TTM_PL_FLAG_NO_EVICT;
}
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
- if (unlikely(r)) {
+ if (unlikely(r))
dev_err(adev->dev, "%p validate failed for unpin\n", bo);
- goto error;
- }
- if (bo->tbo.mem.mem_type == TTM_PL_VRAM) {
- adev->vram_pin_size -= amdgpu_bo_size(bo);
- if (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)
- adev->invisible_pin_size -= amdgpu_bo_size(bo);
- } else if (bo->tbo.mem.mem_type == TTM_PL_TT) {
- adev->gart_pin_size -= amdgpu_bo_size(bo);
- }
-
-error:
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
index e969c87..e5da465 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h
@@ -73,6 +73,7 @@
uint64_t amdgpu_gtt_mgr_usage(struct ttm_mem_type_manager *man);
int amdgpu_gtt_mgr_recover(struct ttm_mem_type_manager *man);
+u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo);
uint64_t amdgpu_vram_mgr_usage(struct ttm_mem_type_manager *man);
uint64_t amdgpu_vram_mgr_vis_usage(struct ttm_mem_type_manager *man);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index bcf68f8..3ff08e3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -130,7 +130,7 @@
unsigned version_major, version_minor, family_id;
int i, j, r;
- INIT_DELAYED_WORK(&adev->uvd.inst->idle_work, amdgpu_uvd_idle_work_handler);
+ INIT_DELAYED_WORK(&adev->uvd.idle_work, amdgpu_uvd_idle_work_handler);
switch (adev->asic_type) {
#ifdef CONFIG_DRM_AMDGPU_CIK
@@ -314,12 +314,12 @@
void *ptr;
int i, j;
+ cancel_delayed_work_sync(&adev->uvd.idle_work);
+
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
- cancel_delayed_work_sync(&adev->uvd.inst[j].idle_work);
-
/* only valid for physical mode */
if (adev->asic_type < CHIP_POLARIS10) {
for (i = 0; i < adev->uvd.max_handles; ++i)
@@ -1145,7 +1145,7 @@
static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
{
struct amdgpu_device *adev =
- container_of(work, struct amdgpu_device, uvd.inst->idle_work.work);
+ container_of(work, struct amdgpu_device, uvd.idle_work.work);
unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
@@ -1167,7 +1167,7 @@
AMD_CG_STATE_GATE);
}
} else {
- schedule_delayed_work(&adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+ schedule_delayed_work(&adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
}
@@ -1179,7 +1179,7 @@
if (amdgpu_sriov_vf(adev))
return;
- set_clocks = !cancel_delayed_work_sync(&adev->uvd.inst->idle_work);
+ set_clocks = !cancel_delayed_work_sync(&adev->uvd.idle_work);
if (set_clocks) {
if (adev->pm.dpm_enabled) {
amdgpu_dpm_enable_uvd(adev, true);
@@ -1196,7 +1196,7 @@
void amdgpu_uvd_ring_end_use(struct amdgpu_ring *ring)
{
if (!amdgpu_sriov_vf(ring->adev))
- schedule_delayed_work(&ring->adev->uvd.inst->idle_work, UVD_IDLE_TIMEOUT);
+ schedule_delayed_work(&ring->adev->uvd.idle_work, UVD_IDLE_TIMEOUT);
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index b1579fb..8b23a1b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -44,7 +44,6 @@
void *saved_bo;
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_file *filp[AMDGPU_MAX_UVD_HANDLES];
- struct delayed_work idle_work;
struct amdgpu_ring ring;
struct amdgpu_ring ring_enc[AMDGPU_MAX_UVD_ENC_RINGS];
struct amdgpu_irq_src irq;
@@ -62,6 +61,7 @@
bool address_64_bit;
bool use_ctx_buf;
struct amdgpu_uvd_inst inst[AMDGPU_MAX_UVD_INSTANCES];
+ struct delayed_work idle_work;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
index 9aca653..b6333f9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vram_mgr.c
@@ -97,6 +97,38 @@
}
/**
+ * amdgpu_vram_mgr_bo_invisible_size - CPU invisible BO size
+ *
+ * @bo: &amdgpu_bo buffer object (must be in VRAM)
+ *
+ * Returns:
+ * How much of the given &amdgpu_bo buffer object lies in CPU invisible VRAM.
+ */
+u64 amdgpu_vram_mgr_bo_invisible_size(struct amdgpu_bo *bo)
+{
+ struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev);
+ struct ttm_mem_reg *mem = &bo->tbo.mem;
+ struct drm_mm_node *nodes = mem->mm_node;
+ unsigned pages = mem->num_pages;
+ u64 usage = 0;
+
+ if (adev->gmc.visible_vram_size == adev->gmc.real_vram_size)
+ return 0;
+
+ if (mem->start >= adev->gmc.visible_vram_size >> PAGE_SHIFT)
+ return amdgpu_bo_size(bo);
+
+ while (nodes && pages) {
+ usage += nodes->size << PAGE_SHIFT;
+ usage -= amdgpu_vram_mgr_vis_size(adev, nodes);
+ pages -= nodes->size;
+ ++nodes;
+ }
+
+ return usage;
+}
+
+/**
* amdgpu_vram_mgr_new - allocate new ranges
*
* @man: TTM memory type manager
@@ -135,7 +167,8 @@
num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
}
- nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
+ nodes = kvmalloc_array(num_nodes, sizeof(*nodes),
+ GFP_KERNEL | __GFP_ZERO);
if (!nodes)
return -ENOMEM;
@@ -190,7 +223,7 @@
drm_mm_remove_node(&nodes[i]);
spin_unlock(&mgr->lock);
- kfree(nodes);
+ kvfree(nodes);
return r == -ENOSPC ? 0 : r;
}
@@ -229,7 +262,7 @@
atomic64_sub(usage, &mgr->usage);
atomic64_sub(vis_usage, &mgr->vis_usage);
- kfree(mem->mm_node);
+ kvfree(mem->mm_node);
mem->mm_node = NULL;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
index dbe4b1f..2236487 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_powertune.c
@@ -1090,7 +1090,7 @@
static int vega10_enable_psm_gc_edc_config(struct pp_hwmgr *hwmgr)
{
struct amdgpu_device *adev = hwmgr->adev;
- int result;
+ int result = 0;
uint32_t num_se = 0;
uint32_t count, data;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 73c875d..47e0992 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -839,7 +839,7 @@
return ret;
}
- if (desc->layout.xstride && desc->layout.pstride) {
+ if (desc->layout.xstride[0] && desc->layout.pstride[0]) {
int ret;
ret = drm_plane_create_rotation_property(&plane->base,
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 7ab3604..250effa 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -36,8 +36,11 @@
#define SII8620_BURST_BUF_LEN 288
#define VAL_RX_HDMI_CTRL2_DEFVAL VAL_RX_HDMI_CTRL2_IDLE_CNT(3)
-#define MHL1_MAX_LCLK 225000
-#define MHL3_MAX_LCLK 600000
+
+#define MHL1_MAX_PCLK 75000
+#define MHL1_MAX_PCLK_PP_MODE 150000
+#define MHL3_MAX_PCLK 200000
+#define MHL3_MAX_PCLK_PP_MODE 300000
enum sii8620_mode {
CM_DISCONNECTED,
@@ -80,6 +83,9 @@
u8 devcap[MHL_DCAP_SIZE];
u8 xdevcap[MHL_XDC_SIZE];
u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
+ bool feature_complete;
+ bool devcap_read;
+ bool sink_detected;
struct edid *edid;
unsigned int gen2_write_burst:1;
enum sii8620_mt_state mt_state;
@@ -476,7 +482,7 @@
}
}
-static void sii8620_sink_detected(struct sii8620 *ctx, int ret)
+static void sii8620_identify_sink(struct sii8620 *ctx)
{
static const char * const sink_str[] = {
[SINK_NONE] = "NONE",
@@ -487,7 +493,7 @@
char sink_name[20];
struct device *dev = ctx->dev;
- if (ret < 0)
+ if (!ctx->sink_detected || !ctx->devcap_read)
return;
sii8620_fetch_edid(ctx);
@@ -496,6 +502,7 @@
sii8620_mhl_disconnected(ctx);
return;
}
+ sii8620_set_upstream_edid(ctx);
if (drm_detect_hdmi_monitor(ctx->edid))
ctx->sink_type = SINK_HDMI;
@@ -508,53 +515,6 @@
sink_str[ctx->sink_type], sink_name);
}
-static void sii8620_hsic_init(struct sii8620 *ctx)
-{
- if (!sii8620_is_mhl3(ctx))
- return;
-
- sii8620_write(ctx, REG_FCGC,
- BIT_FCGC_HSIC_HOSTMODE | BIT_FCGC_HSIC_ENABLE);
- sii8620_setbits(ctx, REG_HRXCTRL3,
- BIT_HRXCTRL3_HRX_STAY_RESET | BIT_HRXCTRL3_STATUS_EN, ~0);
- sii8620_setbits(ctx, REG_TTXNUMB, MSK_TTXNUMB_TTX_NUMBPS, 4);
- sii8620_setbits(ctx, REG_TRXCTRL, BIT_TRXCTRL_TRX_FROM_SE_COC, ~0);
- sii8620_setbits(ctx, REG_HTXCTRL, BIT_HTXCTRL_HTX_DRVCONN1, 0);
- sii8620_setbits(ctx, REG_KEEPER, MSK_KEEPER_MODE, VAL_KEEPER_MODE_HOST);
- sii8620_write_seq_static(ctx,
- REG_TDMLLCTL, 0,
- REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST |
- BIT_UTSRST_KEEPER_SRST | BIT_UTSRST_FC_SRST,
- REG_UTSRST, BIT_UTSRST_HRX_SRST | BIT_UTSRST_HTX_SRST,
- REG_HRXINTL, 0xff,
- REG_HRXINTH, 0xff,
- REG_TTXINTL, 0xff,
- REG_TTXINTH, 0xff,
- REG_TRXINTL, 0xff,
- REG_TRXINTH, 0xff,
- REG_HTXINTL, 0xff,
- REG_HTXINTH, 0xff,
- REG_FCINTR0, 0xff,
- REG_FCINTR1, 0xff,
- REG_FCINTR2, 0xff,
- REG_FCINTR3, 0xff,
- REG_FCINTR4, 0xff,
- REG_FCINTR5, 0xff,
- REG_FCINTR6, 0xff,
- REG_FCINTR7, 0xff
- );
-}
-
-static void sii8620_edid_read(struct sii8620 *ctx, int ret)
-{
- if (ret < 0)
- return;
-
- sii8620_set_upstream_edid(ctx);
- sii8620_hsic_init(ctx);
- sii8620_enable_hpd(ctx);
-}
-
static void sii8620_mr_devcap(struct sii8620 *ctx)
{
u8 dcap[MHL_DCAP_SIZE];
@@ -570,6 +530,8 @@
dcap[MHL_DCAP_ADOPTER_ID_H], dcap[MHL_DCAP_ADOPTER_ID_L],
dcap[MHL_DCAP_DEVICE_ID_H], dcap[MHL_DCAP_DEVICE_ID_L]);
sii8620_update_array(ctx->devcap, dcap, MHL_DCAP_SIZE);
+ ctx->devcap_read = true;
+ sii8620_identify_sink(ctx);
}
static void sii8620_mr_xdevcap(struct sii8620 *ctx)
@@ -807,6 +769,7 @@
static void sii8620_fetch_edid(struct sii8620 *ctx)
{
u8 lm_ddc, ddc_cmd, int3, cbus;
+ unsigned long timeout;
int fetched, i;
int edid_len = EDID_LENGTH;
u8 *edid;
@@ -856,23 +819,31 @@
REG_DDC_CMD, ddc_cmd | VAL_DDC_CMD_ENH_DDC_READ_NO_ACK
);
- do {
- int3 = sii8620_readb(ctx, REG_INTR3);
+ int3 = 0;
+ timeout = jiffies + msecs_to_jiffies(200);
+ for (;;) {
cbus = sii8620_readb(ctx, REG_CBUS_STATUS);
-
- if (int3 & BIT_DDC_CMD_DONE)
- break;
-
- if (!(cbus & BIT_CBUS_STATUS_CBUS_CONNECTED)) {
+ if (~cbus & BIT_CBUS_STATUS_CBUS_CONNECTED) {
kfree(edid);
edid = NULL;
goto end;
}
- } while (1);
-
- sii8620_readb(ctx, REG_DDC_STATUS);
- while (sii8620_readb(ctx, REG_DDC_DOUT_CNT) < FETCH_SIZE)
+ if (int3 & BIT_DDC_CMD_DONE) {
+ if (sii8620_readb(ctx, REG_DDC_DOUT_CNT)
+ >= FETCH_SIZE)
+ break;
+ } else {
+ int3 = sii8620_readb(ctx, REG_INTR3);
+ }
+ if (time_is_before_jiffies(timeout)) {
+ ctx->error = -ETIMEDOUT;
+ dev_err(ctx->dev, "timeout during EDID read\n");
+ kfree(edid);
+ edid = NULL;
+ goto end;
+ }
usleep_range(10, 20);
+ }
sii8620_read_buf(ctx, REG_DDC_DATA, edid + fetched, FETCH_SIZE);
if (fetched + FETCH_SIZE == EDID_LENGTH) {
@@ -971,8 +942,17 @@
ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
if (ret)
return ret;
+
usleep_range(10000, 20000);
- return clk_prepare_enable(ctx->clk_xtal);
+ ret = clk_prepare_enable(ctx->clk_xtal);
+ if (ret)
+ return ret;
+
+ msleep(100);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ msleep(100);
+
+ return 0;
}
static int sii8620_hw_off(struct sii8620 *ctx)
@@ -982,17 +962,6 @@
return regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
}
-static void sii8620_hw_reset(struct sii8620 *ctx)
-{
- usleep_range(10000, 20000);
- gpiod_set_value(ctx->gpio_reset, 0);
- usleep_range(5000, 20000);
- gpiod_set_value(ctx->gpio_reset, 1);
- usleep_range(10000, 20000);
- gpiod_set_value(ctx->gpio_reset, 0);
- msleep(300);
-}
-
static void sii8620_cbus_reset(struct sii8620 *ctx)
{
sii8620_write(ctx, REG_PWD_SRST, BIT_PWD_SRST_CBUS_RST
@@ -1048,20 +1017,11 @@
static void sii8620_set_format(struct sii8620 *ctx)
{
- u8 out_fmt;
-
if (sii8620_is_mhl3(ctx)) {
sii8620_setbits(ctx, REG_M3_P0CTRL,
BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
ctx->use_packed_pixel ? ~0 : 0);
} else {
- if (ctx->use_packed_pixel)
- sii8620_write_seq_static(ctx,
- REG_VID_MODE, BIT_VID_MODE_M1080P,
- REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
- REG_MHLTX_CTL6, 0x60
- );
- else
sii8620_write_seq_static(ctx,
REG_VID_MODE, 0,
REG_MHL_TOP_CTL, 1,
@@ -1069,15 +1029,9 @@
);
}
- if (ctx->use_packed_pixel)
- out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL) |
- BIT_TPI_OUTPUT_CSCMODE709;
- else
- out_fmt = VAL_TPI_FORMAT(RGB, FULL);
-
sii8620_write_seq(ctx,
REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
- REG_TPI_OUTPUT, out_fmt,
+ REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
);
}
@@ -1216,7 +1170,7 @@
int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
int i;
- for (i = 0; i < ARRAY_SIZE(clk_spec); ++i)
+ for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
if (clk < clk_spec[i].max_clk)
break;
@@ -1534,6 +1488,16 @@
);
}
+static void sii8620_hpd_unplugged(struct sii8620 *ctx)
+{
+ sii8620_disable_hpd(ctx);
+ ctx->sink_type = SINK_NONE;
+ ctx->sink_detected = false;
+ ctx->feature_complete = false;
+ kfree(ctx->edid);
+ ctx->edid = NULL;
+}
+
static void sii8620_disconnect(struct sii8620 *ctx)
{
sii8620_disable_gen2_write_burst(ctx);
@@ -1561,7 +1525,7 @@
REG_MHL_DP_CTL6, 0x2A,
REG_MHL_DP_CTL7, 0x03
);
- sii8620_disable_hpd(ctx);
+ sii8620_hpd_unplugged(ctx);
sii8620_write_seq_static(ctx,
REG_M3_CTRL, VAL_M3_CTRL_MHL3_VALUE,
REG_MHL_COC_CTL1, 0x07,
@@ -1609,10 +1573,8 @@
memset(ctx->xstat, 0, sizeof(ctx->xstat));
memset(ctx->devcap, 0, sizeof(ctx->devcap));
memset(ctx->xdevcap, 0, sizeof(ctx->xdevcap));
+ ctx->devcap_read = false;
ctx->cbus_status = 0;
- ctx->sink_type = SINK_NONE;
- kfree(ctx->edid);
- ctx->edid = NULL;
sii8620_mt_cleanup(ctx);
}
@@ -1703,9 +1665,6 @@
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
MHL_DST_LM_CLK_MODE_NORMAL
| MHL_DST_LM_PATH_ENABLED);
- if (!sii8620_is_mhl3(ctx))
- sii8620_mt_read_devcap(ctx, false);
- sii8620_mt_set_cont(ctx, sii8620_sink_detected);
} else {
sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
MHL_DST_LM_CLK_MODE_NORMAL);
@@ -1722,9 +1681,14 @@
sii8620_update_array(ctx->stat, st, MHL_DST_SIZE);
sii8620_update_array(ctx->xstat, xst, MHL_XDS_SIZE);
- if (ctx->stat[MHL_DST_CONNECTED_RDY] & MHL_DST_CONN_DCAP_RDY)
+ if (ctx->stat[MHL_DST_CONNECTED_RDY] & st[MHL_DST_CONNECTED_RDY] &
+ MHL_DST_CONN_DCAP_RDY) {
sii8620_status_dcap_ready(ctx);
+ if (!sii8620_is_mhl3(ctx))
+ sii8620_mt_read_devcap(ctx, false);
+ }
+
if (st[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
sii8620_status_changed_path(ctx);
}
@@ -1808,8 +1772,11 @@
}
if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_REQ)
sii8620_send_features(ctx);
- if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE)
- sii8620_edid_read(ctx, 0);
+ if (ints[MHL_INT_RCHANGE] & MHL_INT_RC_FEAT_COMPLETE) {
+ ctx->feature_complete = true;
+ if (ctx->edid)
+ sii8620_enable_hpd(ctx);
+ }
}
static struct sii8620_mt_msg *sii8620_msc_msg_first(struct sii8620 *ctx)
@@ -1884,6 +1851,15 @@
if (stat & BIT_CBUS_MSC_MR_WRITE_STAT)
sii8620_msc_mr_write_stat(ctx);
+ if (stat & BIT_CBUS_HPD_CHG) {
+ if (ctx->cbus_status & BIT_CBUS_STATUS_CBUS_HPD) {
+ ctx->sink_detected = true;
+ sii8620_identify_sink(ctx);
+ } else {
+ sii8620_hpd_unplugged(ctx);
+ }
+ }
+
if (stat & BIT_CBUS_MSC_MR_SET_INT)
sii8620_msc_mr_set_int(ctx);
@@ -1931,14 +1907,6 @@
ctx->mt_state = MT_STATE_DONE;
}
-static void sii8620_scdt_high(struct sii8620 *ctx)
-{
- sii8620_write_seq_static(ctx,
- REG_INTR8_MASK, BIT_CEA_NEW_AVI | BIT_CEA_NEW_VSI,
- REG_TPI_SC, BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI,
- );
-}
-
static void sii8620_irq_scdt(struct sii8620 *ctx)
{
u8 stat = sii8620_readb(ctx, REG_INTR5);
@@ -1946,53 +1914,13 @@
if (stat & BIT_INTR_SCDT_CHANGE) {
u8 cstat = sii8620_readb(ctx, REG_TMDS_CSTAT_P3);
- if (cstat & BIT_TMDS_CSTAT_P3_SCDT) {
- if (ctx->sink_type == SINK_HDMI)
- /* enable infoframe interrupt */
- sii8620_scdt_high(ctx);
- else
- sii8620_start_video(ctx);
- }
+ if (cstat & BIT_TMDS_CSTAT_P3_SCDT)
+ sii8620_start_video(ctx);
}
sii8620_write(ctx, REG_INTR5, stat);
}
-static void sii8620_new_vsi(struct sii8620 *ctx)
-{
- u8 vsif[11];
-
- sii8620_write(ctx, REG_RX_HDMI_CTRL2,
- VAL_RX_HDMI_CTRL2_DEFVAL |
- BIT_RX_HDMI_CTRL2_VSI_MON_SEL_VSI);
- sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, vsif,
- ARRAY_SIZE(vsif));
-}
-
-static void sii8620_new_avi(struct sii8620 *ctx)
-{
- sii8620_write(ctx, REG_RX_HDMI_CTRL2, VAL_RX_HDMI_CTRL2_DEFVAL);
- sii8620_read_buf(ctx, REG_RX_HDMI_MON_PKT_HEADER1, ctx->avif,
- ARRAY_SIZE(ctx->avif));
-}
-
-static void sii8620_irq_infr(struct sii8620 *ctx)
-{
- u8 stat = sii8620_readb(ctx, REG_INTR8)
- & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI);
-
- sii8620_write(ctx, REG_INTR8, stat);
-
- if (stat & BIT_CEA_NEW_VSI)
- sii8620_new_vsi(ctx);
-
- if (stat & BIT_CEA_NEW_AVI)
- sii8620_new_avi(ctx);
-
- if (stat & (BIT_CEA_NEW_VSI | BIT_CEA_NEW_AVI))
- sii8620_start_video(ctx);
-}
-
static void sii8620_got_xdevcap(struct sii8620 *ctx, int ret)
{
if (ret < 0)
@@ -2043,11 +1971,11 @@
if (stat & BIT_DDC_CMD_DONE) {
sii8620_write(ctx, REG_INTR3_MASK, 0);
- if (sii8620_is_mhl3(ctx))
+ if (sii8620_is_mhl3(ctx) && !ctx->feature_complete)
sii8620_mt_set_int(ctx, MHL_INT_REG(RCHANGE),
MHL_INT_RC_FEAT_REQ);
else
- sii8620_edid_read(ctx, 0);
+ sii8620_enable_hpd(ctx);
}
sii8620_write(ctx, REG_INTR3, stat);
}
@@ -2074,7 +2002,6 @@
{ BIT_FAST_INTR_STAT_EDID, sii8620_irq_edid },
{ BIT_FAST_INTR_STAT_DDC, sii8620_irq_ddc },
{ BIT_FAST_INTR_STAT_SCDT, sii8620_irq_scdt },
- { BIT_FAST_INTR_STAT_INFR, sii8620_irq_infr },
};
struct sii8620 *ctx = data;
u8 stats[LEN_FAST_INTR_STAT];
@@ -2112,7 +2039,6 @@
dev_err(dev, "Error powering on, %d.\n", ret);
return;
}
- sii8620_hw_reset(ctx);
sii8620_read_buf(ctx, REG_VND_IDL, ver, ARRAY_SIZE(ver));
ret = sii8620_clear_error(ctx);
@@ -2268,17 +2194,43 @@
rc_unregister_device(ctx->rc_dev);
}
+static int sii8620_is_packing_required(struct sii8620 *ctx,
+ const struct drm_display_mode *mode)
+{
+ int max_pclk, max_pclk_pp_mode;
+
+ if (sii8620_is_mhl3(ctx)) {
+ max_pclk = MHL3_MAX_PCLK;
+ max_pclk_pp_mode = MHL3_MAX_PCLK_PP_MODE;
+ } else {
+ max_pclk = MHL1_MAX_PCLK;
+ max_pclk_pp_mode = MHL1_MAX_PCLK_PP_MODE;
+ }
+
+ if (mode->clock < max_pclk)
+ return 0;
+ else if (mode->clock < max_pclk_pp_mode)
+ return 1;
+ else
+ return -1;
+}
+
static enum drm_mode_status sii8620_mode_valid(struct drm_bridge *bridge,
const struct drm_display_mode *mode)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
+ int pack_required = sii8620_is_packing_required(ctx, mode);
bool can_pack = ctx->devcap[MHL_DCAP_VID_LINK_MODE] &
MHL_DCAP_VID_LINK_PPIXEL;
- unsigned int max_pclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK :
- MHL1_MAX_LCLK;
- max_pclk /= can_pack ? 2 : 3;
- return (mode->clock > max_pclk) ? MODE_CLOCK_HIGH : MODE_OK;
+ switch (pack_required) {
+ case 0:
+ return MODE_OK;
+ case 1:
+ return (can_pack) ? MODE_OK : MODE_CLOCK_HIGH;
+ default:
+ return MODE_CLOCK_HIGH;
+ }
}
static bool sii8620_mode_fixup(struct drm_bridge *bridge,
@@ -2286,43 +2238,16 @@
struct drm_display_mode *adjusted_mode)
{
struct sii8620 *ctx = bridge_to_sii8620(bridge);
- int max_lclk;
- bool ret = true;
mutex_lock(&ctx->lock);
- max_lclk = sii8620_is_mhl3(ctx) ? MHL3_MAX_LCLK : MHL1_MAX_LCLK;
- if (max_lclk > 3 * adjusted_mode->clock) {
- ctx->use_packed_pixel = 0;
- goto end;
- }
- if ((ctx->devcap[MHL_DCAP_VID_LINK_MODE] & MHL_DCAP_VID_LINK_PPIXEL) &&
- max_lclk > 2 * adjusted_mode->clock) {
- ctx->use_packed_pixel = 1;
- goto end;
- }
- ret = false;
-end:
- if (ret) {
- u8 vic = drm_match_cea_mode(adjusted_mode);
+ ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
+ ctx->video_code = drm_match_cea_mode(adjusted_mode);
+ ctx->pixel_clock = adjusted_mode->clock;
- if (!vic) {
- union hdmi_infoframe frm;
- u8 mhl_vic[] = { 0, 95, 94, 93, 98 };
-
- /* FIXME: We need the connector here */
- drm_hdmi_vendor_infoframe_from_display_mode(
- &frm.vendor.hdmi, NULL, adjusted_mode);
- vic = frm.vendor.hdmi.vic;
- if (vic >= ARRAY_SIZE(mhl_vic))
- vic = 0;
- vic = mhl_vic[vic];
- }
- ctx->video_code = vic;
- ctx->pixel_clock = adjusted_mode->clock;
- }
mutex_unlock(&ctx->lock);
- return ret;
+
+ return true;
}
static const struct drm_bridge_funcs sii8620_bridge_funcs = {
diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
index b553a6f..7af748e 100644
--- a/drivers/gpu/drm/drm_drv.c
+++ b/drivers/gpu/drm/drm_drv.c
@@ -369,13 +369,6 @@
*/
void drm_dev_unplug(struct drm_device *dev)
{
- drm_dev_unregister(dev);
-
- mutex_lock(&drm_global_mutex);
- if (dev->open_count == 0)
- drm_dev_put(dev);
- mutex_unlock(&drm_global_mutex);
-
/*
* After synchronizing any critical read section is guaranteed to see
* the new value of ->unplugged, and any critical section which might
@@ -384,6 +377,13 @@
*/
dev->unplugged = true;
synchronize_srcu(&drm_unplug_srcu);
+
+ drm_dev_unregister(dev);
+
+ mutex_lock(&drm_global_mutex);
+ if (dev->open_count == 0)
+ drm_dev_put(dev);
+ mutex_unlock(&drm_global_mutex);
}
EXPORT_SYMBOL(drm_dev_unplug);
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index 34c125e..7014a96 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -340,14 +340,21 @@
unsigned int bsd_engine;
-/* Client can have a maximum of 3 contexts banned before
- * it is denied of creating new contexts. As one context
- * ban needs 4 consecutive hangs, and more if there is
- * progress in between, this is a last resort stop gap measure
- * to limit the badly behaving clients access to gpu.
+/*
+ * Every context ban increments per client ban score. Also
+ * hangs in short succession increments ban score. If ban threshold
+ * is reached, client is considered banned and submitting more work
+ * will fail. This is a stop gap measure to limit the badly behaving
+ * clients access to gpu. Note that unbannable contexts never increment
+ * the client ban score.
*/
-#define I915_MAX_CLIENT_CONTEXT_BANS 3
- atomic_t context_bans;
+#define I915_CLIENT_SCORE_HANG_FAST 1
+#define I915_CLIENT_FAST_HANG_JIFFIES (60 * HZ)
+#define I915_CLIENT_SCORE_CONTEXT_BAN 3
+#define I915_CLIENT_SCORE_BANNED 9
+ /** ban_score: Accumulated score of all ctx bans and fast hangs. */
+ atomic_t ban_score;
+ unsigned long hang_timestamp;
};
/* Interface history:
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index 3704f4c..d44ad7b 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -2933,32 +2933,54 @@
return 0;
}
+static void i915_gem_client_mark_guilty(struct drm_i915_file_private *file_priv,
+ const struct i915_gem_context *ctx)
+{
+ unsigned int score;
+ unsigned long prev_hang;
+
+ if (i915_gem_context_is_banned(ctx))
+ score = I915_CLIENT_SCORE_CONTEXT_BAN;
+ else
+ score = 0;
+
+ prev_hang = xchg(&file_priv->hang_timestamp, jiffies);
+ if (time_before(jiffies, prev_hang + I915_CLIENT_FAST_HANG_JIFFIES))
+ score += I915_CLIENT_SCORE_HANG_FAST;
+
+ if (score) {
+ atomic_add(score, &file_priv->ban_score);
+
+ DRM_DEBUG_DRIVER("client %s: gained %u ban score, now %u\n",
+ ctx->name, score,
+ atomic_read(&file_priv->ban_score));
+ }
+}
+
static void i915_gem_context_mark_guilty(struct i915_gem_context *ctx)
{
- bool banned;
+ unsigned int score;
+ bool banned, bannable;
atomic_inc(&ctx->guilty_count);
- banned = false;
- if (i915_gem_context_is_bannable(ctx)) {
- unsigned int score;
+ bannable = i915_gem_context_is_bannable(ctx);
+ score = atomic_add_return(CONTEXT_SCORE_GUILTY, &ctx->ban_score);
+ banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
- score = atomic_add_return(CONTEXT_SCORE_GUILTY,
- &ctx->ban_score);
- banned = score >= CONTEXT_SCORE_BAN_THRESHOLD;
+ DRM_DEBUG_DRIVER("context %s: guilty %d, score %u, ban %s\n",
+ ctx->name, atomic_read(&ctx->guilty_count),
+ score, yesno(banned && bannable));
- DRM_DEBUG_DRIVER("context %s marked guilty (score %d) banned? %s\n",
- ctx->name, score, yesno(banned));
- }
- if (!banned)
+ /* Cool contexts don't accumulate client ban score */
+ if (!bannable)
return;
- i915_gem_context_set_banned(ctx);
- if (!IS_ERR_OR_NULL(ctx->file_priv)) {
- atomic_inc(&ctx->file_priv->context_bans);
- DRM_DEBUG_DRIVER("client %s has had %d context banned\n",
- ctx->name, atomic_read(&ctx->file_priv->context_bans));
- }
+ if (banned)
+ i915_gem_context_set_banned(ctx);
+
+ if (!IS_ERR_OR_NULL(ctx->file_priv))
+ i915_gem_client_mark_guilty(ctx->file_priv, ctx);
}
static void i915_gem_context_mark_innocent(struct i915_gem_context *ctx)
@@ -5736,6 +5758,7 @@
INIT_LIST_HEAD(&file_priv->mm.request_list);
file_priv->bsd_engine = -1;
+ file_priv->hang_timestamp = jiffies;
ret = i915_gem_context_open(i915, file);
if (ret)
diff --git a/drivers/gpu/drm/i915/i915_gem_context.c b/drivers/gpu/drm/i915/i915_gem_context.c
index 33f8a4b..060335d 100644
--- a/drivers/gpu/drm/i915/i915_gem_context.c
+++ b/drivers/gpu/drm/i915/i915_gem_context.c
@@ -652,7 +652,7 @@
static bool client_is_banned(struct drm_i915_file_private *file_priv)
{
- return atomic_read(&file_priv->context_bans) > I915_MAX_CLIENT_CONTEXT_BANS;
+ return atomic_read(&file_priv->ban_score) >= I915_CLIENT_SCORE_BANNED;
}
int i915_gem_context_create_ioctl(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
index f627a8c..22df17c 100644
--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
@@ -489,7 +489,9 @@
}
static int
-eb_add_vma(struct i915_execbuffer *eb, unsigned int i, struct i915_vma *vma)
+eb_add_vma(struct i915_execbuffer *eb,
+ unsigned int i, unsigned batch_idx,
+ struct i915_vma *vma)
{
struct drm_i915_gem_exec_object2 *entry = &eb->exec[i];
int err;
@@ -522,6 +524,24 @@
eb->flags[i] = entry->flags;
vma->exec_flags = &eb->flags[i];
+ /*
+ * SNA is doing fancy tricks with compressing batch buffers, which leads
+ * to negative relocation deltas. Usually that works out ok since the
+ * relocate address is still positive, except when the batch is placed
+ * very low in the GTT. Ensure this doesn't happen.
+ *
+ * Note that actual hangs have only been observed on gen7, but for
+ * paranoia do it everywhere.
+ */
+ if (i == batch_idx) {
+ if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
+ eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
+ if (eb->reloc_cache.has_fence)
+ eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
+
+ eb->batch = vma;
+ }
+
err = 0;
if (eb_pin_vma(eb, entry, vma)) {
if (entry->offset != vma->node.start) {
@@ -716,7 +736,7 @@
{
struct radix_tree_root *handles_vma = &eb->ctx->handles_vma;
struct drm_i915_gem_object *obj;
- unsigned int i;
+ unsigned int i, batch;
int err;
if (unlikely(i915_gem_context_is_closed(eb->ctx)))
@@ -728,6 +748,8 @@
INIT_LIST_HEAD(&eb->relocs);
INIT_LIST_HEAD(&eb->unbound);
+ batch = eb_batch_index(eb);
+
for (i = 0; i < eb->buffer_count; i++) {
u32 handle = eb->exec[i].handle;
struct i915_lut_handle *lut;
@@ -770,33 +792,16 @@
lut->handle = handle;
add_vma:
- err = eb_add_vma(eb, i, vma);
+ err = eb_add_vma(eb, i, batch, vma);
if (unlikely(err))
goto err_vma;
GEM_BUG_ON(vma != eb->vma[i]);
GEM_BUG_ON(vma->exec_flags != &eb->flags[i]);
+ GEM_BUG_ON(drm_mm_node_allocated(&vma->node) &&
+ eb_vma_misplaced(&eb->exec[i], vma, eb->flags[i]));
}
- /* take note of the batch buffer before we might reorder the lists */
- i = eb_batch_index(eb);
- eb->batch = eb->vma[i];
- GEM_BUG_ON(eb->batch->exec_flags != &eb->flags[i]);
-
- /*
- * SNA is doing fancy tricks with compressing batch buffers, which leads
- * to negative relocation deltas. Usually that works out ok since the
- * relocate address is still positive, except when the batch is placed
- * very low in the GTT. Ensure this doesn't happen.
- *
- * Note that actual hangs have only been observed on gen7, but for
- * paranoia do it everywhere.
- */
- if (!(eb->flags[i] & EXEC_OBJECT_PINNED))
- eb->flags[i] |= __EXEC_OBJECT_NEEDS_BIAS;
- if (eb->reloc_cache.has_fence)
- eb->flags[i] |= EXEC_OBJECT_NEEDS_FENCE;
-
eb->args->flags |= __EXEC_VALIDATED;
return eb_reserve(eb);
diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
index f9bc3aa..4a02747 100644
--- a/drivers/gpu/drm/i915/i915_irq.c
+++ b/drivers/gpu/drm/i915/i915_irq.c
@@ -1893,9 +1893,17 @@
/*
* Clear the PIPE*STAT regs before the IIR
+ *
+ * Toggle the enable bits to make sure we get an
+ * edge in the ISR pipe event bit if we don't clear
+ * all the enabled status bits. Otherwise the edge
+ * triggered IIR on i965/g4x wouldn't notice that
+ * an interrupt is still pending.
*/
- if (pipe_stats[pipe])
- I915_WRITE(reg, enable_mask | pipe_stats[pipe]);
+ if (pipe_stats[pipe]) {
+ I915_WRITE(reg, pipe_stats[pipe]);
+ I915_WRITE(reg, enable_mask);
+ }
}
spin_unlock(&dev_priv->irq_lock);
}
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index f11bb21..7720569 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -2425,12 +2425,17 @@
#define _3D_CHICKEN _MMIO(0x2084)
#define _3D_CHICKEN_HIZ_PLANE_DISABLE_MSAA_4X_SNB (1 << 10)
#define _3D_CHICKEN2 _MMIO(0x208c)
+
+#define FF_SLICE_CHICKEN _MMIO(0x2088)
+#define FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX (1 << 1)
+
/* Disables pipelining of read flushes past the SF-WIZ interface.
* Required on all Ironlake steppings according to the B-Spec, but the
* particular danger of not doing so is not specified.
*/
# define _3D_CHICKEN2_WM_READ_PIPELINED (1 << 14)
#define _3D_CHICKEN3 _MMIO(0x2090)
+#define _3D_CHICKEN_SF_PROVOKING_VERTEX_FIX (1 << 12)
#define _3D_CHICKEN_SF_DISABLE_OBJEND_CULL (1 << 10)
#define _3D_CHICKEN3_AA_LINE_QUALITY_FIX_ENABLE (1 << 5)
#define _3D_CHICKEN3_SF_DISABLE_FASTCLIP_CULL (1 << 5)
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index de0e223..072b326 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -304,6 +304,9 @@
int max_dotclk = dev_priv->max_dotclk_freq;
int max_clock;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (mode->clock < 25000)
return MODE_CLOCK_LOW;
@@ -337,6 +340,12 @@
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
return true;
}
@@ -344,6 +353,12 @@
struct intel_crtc_state *pipe_config,
struct drm_connector_state *conn_state)
{
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
pipe_config->has_pch_encoder = true;
return true;
@@ -354,6 +369,11 @@
struct drm_connector_state *conn_state)
{
struct drm_i915_private *dev_priv = to_i915(encoder->base.dev);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
+
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
pipe_config->has_pch_encoder = true;
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index dee3a8e..2cc6faa 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -14469,12 +14469,22 @@
intel_mode_valid(struct drm_device *dev,
const struct drm_display_mode *mode)
{
+ /*
+ * Can't reject DBLSCAN here because Xorg ddxen can add piles
+ * of DBLSCAN modes to the output's mode list when they detect
+ * the scaling mode property on the connector. And they don't
+ * ask the kernel to validate those modes in any way until
+ * modeset time at which point the client gets a protocol error.
+ * So in order to not upset those clients we silently ignore the
+ * DBLSCAN flag on such connectors. For other connectors we will
+ * reject modes with the DBLSCAN flag in encoder->compute_config().
+ * And we always reject DBLSCAN modes in connector->mode_valid()
+ * as we never want such modes on the connector's mode list.
+ */
+
if (mode->vscan > 1)
return MODE_NO_VSCAN;
- if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
- return MODE_NO_DBLESCAN;
-
if (mode->flags & DRM_MODE_FLAG_HSKEW)
return MODE_H_ILLEGAL;
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 8320f0e..16faea3 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -420,6 +420,9 @@
int max_rate, mode_rate, max_lanes, max_link_clock;
int max_dotclk;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
max_dotclk = intel_dp_downstream_max_dotclock(intel_dp);
if (intel_dp_is_edp(intel_dp) && fixed_mode) {
@@ -1862,7 +1865,10 @@
conn_state->scaling_mode);
}
- if ((IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv)) &&
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
+ if (HAS_GMCH_DISPLAY(dev_priv) &&
adjusted_mode->flags & DRM_MODE_FLAG_INTERLACE)
return false;
@@ -2784,16 +2790,6 @@
const struct drm_connector_state *old_conn_state)
{
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
-
- /* disable the port before the pipe on g4x */
- intel_dp_link_down(encoder, old_crtc_state);
-}
-
-static void ilk_disable_dp(struct intel_encoder *encoder,
- const struct intel_crtc_state *old_crtc_state,
- const struct drm_connector_state *old_conn_state)
-{
- intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
static void vlv_disable_dp(struct intel_encoder *encoder,
@@ -2807,13 +2803,19 @@
intel_disable_dp(encoder, old_crtc_state, old_conn_state);
}
-static void ilk_post_disable_dp(struct intel_encoder *encoder,
+static void g4x_post_disable_dp(struct intel_encoder *encoder,
const struct intel_crtc_state *old_crtc_state,
const struct drm_connector_state *old_conn_state)
{
struct intel_dp *intel_dp = enc_to_intel_dp(&encoder->base);
enum port port = encoder->port;
+ /*
+ * Bspec does not list a specific disable sequence for g4x DP.
+ * Follow the ilk+ sequence (disable pipe before the port) for
+ * g4x DP as it does not suffer from underruns like the normal
+ * g4x modeset sequence (disable pipe after the port).
+ */
intel_dp_link_down(encoder, old_crtc_state);
/* Only ilk+ has port A */
@@ -6337,7 +6339,7 @@
drm_connector_init(dev, connector, &intel_dp_connector_funcs, type);
drm_connector_helper_add(connector, &intel_dp_connector_helper_funcs);
- if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
+ if (!HAS_GMCH_DISPLAY(dev_priv))
connector->interlace_allowed = true;
connector->doublescan_allowed = 0;
@@ -6436,15 +6438,11 @@
intel_encoder->enable = vlv_enable_dp;
intel_encoder->disable = vlv_disable_dp;
intel_encoder->post_disable = vlv_post_disable_dp;
- } else if (INTEL_GEN(dev_priv) >= 5) {
- intel_encoder->pre_enable = g4x_pre_enable_dp;
- intel_encoder->enable = g4x_enable_dp;
- intel_encoder->disable = ilk_disable_dp;
- intel_encoder->post_disable = ilk_post_disable_dp;
} else {
intel_encoder->pre_enable = g4x_pre_enable_dp;
intel_encoder->enable = g4x_enable_dp;
intel_encoder->disable = g4x_disable_dp;
+ intel_encoder->post_disable = g4x_post_disable_dp;
}
intel_dig_port->dp.output_reg = output_reg;
diff --git a/drivers/gpu/drm/i915/intel_dp_mst.c b/drivers/gpu/drm/i915/intel_dp_mst.c
index 9e6956c..5890500 100644
--- a/drivers/gpu/drm/i915/intel_dp_mst.c
+++ b/drivers/gpu/drm/i915/intel_dp_mst.c
@@ -48,6 +48,9 @@
bool reduce_m_n = drm_dp_has_quirk(&intel_dp->desc,
DP_DPCD_QUIRK_LIMITED_M_N);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
pipe_config->has_pch_encoder = false;
bpp = 24;
if (intel_dp->compliance.test_data.bpc) {
@@ -366,6 +369,9 @@
if (!intel_dp)
return MODE_ERROR;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
max_link_clock = intel_dp_max_link_rate(intel_dp);
max_lanes = intel_dp_max_lane_count(intel_dp);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index cf39ca9..f349b39 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -326,6 +326,9 @@
conn_state->scaling_mode);
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
/* DSI uses short packets for sync events, so clear mode flags for DSI */
adjusted_mode->flags = 0;
@@ -1266,6 +1269,9 @@
DRM_DEBUG_KMS("\n");
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (fixed_mode) {
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c
index a70d767..61d908e 100644
--- a/drivers/gpu/drm/i915/intel_dvo.c
+++ b/drivers/gpu/drm/i915/intel_dvo.c
@@ -219,6 +219,9 @@
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
int target_clock = mode->clock;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
/* XXX: Validate clock range */
if (fixed_mode) {
@@ -254,6 +257,9 @@
if (fixed_mode)
intel_fixed_panel_mode(fixed_mode, adjusted_mode);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
return true;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index ee929f3..d8cb53e 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1557,6 +1557,9 @@
bool force_dvi =
READ_ONCE(to_intel_digital_connector_state(connector->state)->force_audio) == HDMI_AUDIO_OFF_DVI;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
clock = mode->clock;
if ((mode->flags & DRM_MODE_FLAG_3D_MASK) == DRM_MODE_FLAG_3D_FRAME_PACKING)
@@ -1677,6 +1680,9 @@
int desired_bpp;
bool force_dvi = intel_conn_state->force_audio == HDMI_AUDIO_OFF_DVI;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
pipe_config->has_hdmi_sink = !force_dvi && intel_hdmi->has_hdmi_sink;
if (pipe_config->has_hdmi_sink)
diff --git a/drivers/gpu/drm/i915/intel_lrc.c b/drivers/gpu/drm/i915/intel_lrc.c
index 15434ca..7c4c8fb 100644
--- a/drivers/gpu/drm/i915/intel_lrc.c
+++ b/drivers/gpu/drm/i915/intel_lrc.c
@@ -1545,11 +1545,21 @@
/* WaFlushCoherentL3CacheLinesAtContextSwitch:skl,bxt,glk */
batch = gen8_emit_flush_coherentl3_wa(engine, batch);
+ *batch++ = MI_LOAD_REGISTER_IMM(3);
+
/* WaDisableGatherAtSetShaderCommonSlice:skl,bxt,kbl,glk */
- *batch++ = MI_LOAD_REGISTER_IMM(1);
*batch++ = i915_mmio_reg_offset(COMMON_SLICE_CHICKEN2);
*batch++ = _MASKED_BIT_DISABLE(
GEN9_DISABLE_GATHER_AT_SET_SHADER_COMMON_SLICE);
+
+ /* BSpec: 11391 */
+ *batch++ = i915_mmio_reg_offset(FF_SLICE_CHICKEN);
+ *batch++ = _MASKED_BIT_ENABLE(FF_SLICE_CHICKEN_CL_PROVOKING_VERTEX_FIX);
+
+ /* BSpec: 11299 */
+ *batch++ = i915_mmio_reg_offset(_3D_CHICKEN3);
+ *batch++ = _MASKED_BIT_ENABLE(_3D_CHICKEN_SF_PROVOKING_VERTEX_FIX);
+
*batch++ = MI_NOOP;
/* WaClearSlmSpaceAtContextSwitch:kbl */
@@ -2641,10 +2651,8 @@
context_size += LRC_HEADER_PAGES * PAGE_SIZE;
ctx_obj = i915_gem_object_create(ctx->i915, context_size);
- if (IS_ERR(ctx_obj)) {
- ret = PTR_ERR(ctx_obj);
- goto error_deref_obj;
- }
+ if (IS_ERR(ctx_obj))
+ return PTR_ERR(ctx_obj);
vma = i915_vma_instance(ctx_obj, &ctx->i915->ggtt.base, NULL);
if (IS_ERR(vma)) {
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index d278f24..48f618d 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -380,6 +380,8 @@
struct drm_display_mode *fixed_mode = intel_connector->panel.fixed_mode;
int max_pixclk = to_i915(connector->dev)->max_dotclk_freq;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
if (mode->hdisplay > fixed_mode->hdisplay)
return MODE_PANEL;
if (mode->vdisplay > fixed_mode->vdisplay)
@@ -429,6 +431,9 @@
intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
adjusted_mode);
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
if (HAS_PCH_SPLIT(dev_priv)) {
pipe_config->has_pch_encoder = true;
diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c
index 2500502..26975df4 100644
--- a/drivers/gpu/drm/i915/intel_sdvo.c
+++ b/drivers/gpu/drm/i915/intel_sdvo.c
@@ -1160,6 +1160,9 @@
adjusted_mode);
}
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
/*
* Make the CRTC code factor in the SDVO pixel multiplier. The
* SDVO device will factor out the multiplier during mode_set.
@@ -1621,6 +1624,9 @@
struct intel_sdvo *intel_sdvo = intel_attached_sdvo(connector);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (intel_sdvo->pixel_clock_min > mode->clock)
return MODE_CLOCK_LOW;
diff --git a/drivers/gpu/drm/i915/intel_tv.c b/drivers/gpu/drm/i915/intel_tv.c
index 885fc38..b55b5c1 100644
--- a/drivers/gpu/drm/i915/intel_tv.c
+++ b/drivers/gpu/drm/i915/intel_tv.c
@@ -850,6 +850,9 @@
const struct tv_mode *tv_mode = intel_tv_mode_find(connector->state);
int max_dotclk = to_i915(connector->dev)->max_dotclk_freq;
+ if (mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return MODE_NO_DBLESCAN;
+
if (mode->clock > max_dotclk)
return MODE_CLOCK_HIGH;
@@ -877,16 +880,21 @@
struct drm_connector_state *conn_state)
{
const struct tv_mode *tv_mode = intel_tv_mode_find(conn_state);
+ struct drm_display_mode *adjusted_mode =
+ &pipe_config->base.adjusted_mode;
if (!tv_mode)
return false;
- pipe_config->base.adjusted_mode.crtc_clock = tv_mode->clock;
+ if (adjusted_mode->flags & DRM_MODE_FLAG_DBLSCAN)
+ return false;
+
+ adjusted_mode->crtc_clock = tv_mode->clock;
DRM_DEBUG_KMS("forcing bpc to 8 for TV\n");
pipe_config->pipe_bpp = 8*3;
/* TV has it's own notion of sync and other mode flags, so clear them. */
- pipe_config->base.adjusted_mode.flags = 0;
+ adjusted_mode->flags = 0;
/*
* FIXME: We don't check whether the input mode is actually what we want
diff --git a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
index 291c081..397143b 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/curs507a.c
@@ -132,7 +132,7 @@
nvif_object_map(&wndw->wimm.base.user, NULL, 0);
wndw->immd = func;
- wndw->ctxdma.parent = &disp->core->chan.base.user;
+ wndw->ctxdma.parent = NULL;
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/dispnv50/wndw.c b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
index 224963b5..c5a9bc1 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/wndw.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/wndw.c
@@ -444,14 +444,17 @@
if (ret)
return ret;
- ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
- if (IS_ERR(ctxdma)) {
- nouveau_bo_unpin(fb->nvbo);
- return PTR_ERR(ctxdma);
+ if (wndw->ctxdma.parent) {
+ ctxdma = nv50_wndw_ctxdma_new(wndw, fb);
+ if (IS_ERR(ctxdma)) {
+ nouveau_bo_unpin(fb->nvbo);
+ return PTR_ERR(ctxdma);
+ }
+
+ asyw->image.handle[0] = ctxdma->object.handle;
}
asyw->state.fence = reservation_object_get_excl_rcu(fb->nvbo->bo.resv);
- asyw->image.handle[0] = ctxdma->object.handle;
asyw->image.offset[0] = fb->nvbo->bo.offset;
if (wndw->func->prepare) {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index b8cda94..768207f 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -623,7 +623,7 @@
struct qxl_cursor_cmd *cmd;
struct qxl_cursor *cursor;
struct drm_gem_object *obj;
- struct qxl_bo *cursor_bo = NULL, *user_bo = NULL;
+ struct qxl_bo *cursor_bo = NULL, *user_bo = NULL, *old_cursor_bo = NULL;
int ret;
void *user_ptr;
int size = 64*64*4;
@@ -677,7 +677,7 @@
cursor_bo, 0);
cmd->type = QXL_CURSOR_SET;
- qxl_bo_unref(&qcrtc->cursor_bo);
+ old_cursor_bo = qcrtc->cursor_bo;
qcrtc->cursor_bo = cursor_bo;
cursor_bo = NULL;
} else {
@@ -697,6 +697,9 @@
qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false);
qxl_release_fence_buffer_objects(release);
+ if (old_cursor_bo)
+ qxl_bo_unref(&old_cursor_bo);
+
qxl_bo_unref(&cursor_bo);
return;
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 08747fc..8232b39 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -17,7 +17,6 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
-#include <drm/drm_panel.h>
#include <uapi/drm/drm_mode.h>
@@ -418,9 +417,6 @@
static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
const struct drm_display_mode *mode)
{
- struct drm_panel *panel = tcon->panel;
- struct drm_connector *connector = panel->connector;
- struct drm_display_info display_info = connector->display_info;
unsigned int bp, hsync, vsync;
u8 clk_delay;
u32 val = 0;
@@ -478,27 +474,6 @@
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
- /*
- * On A20 and similar SoCs, the only way to achieve Positive Edge
- * (Rising Edge), is setting dclk clock phase to 2/3(240°).
- * By default TCON works in Negative Edge(Falling Edge),
- * this is why phase is set to 0 in that case.
- * Unfortunately there's no way to logically invert dclk through
- * IO_POL register.
- * The only acceptable way to work, triple checked with scope,
- * is using clock phase set to 0° for Negative Edge and set to 240°
- * for Positive Edge.
- * On A33 and similar SoCs there would be a 90° phase option,
- * but it divides also dclk by 2.
- * Following code is a way to avoid quirks all around TCON
- * and DOTCLOCK drivers.
- */
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
- clk_set_phase(tcon->dclk, 240);
-
- if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
- clk_set_phase(tcon->dclk, 0);
-
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
val);
diff --git a/drivers/hwmon/dell-smm-hwmon.c b/drivers/hwmon/dell-smm-hwmon.c
index bf3bb7e..9d3ef87 100644
--- a/drivers/hwmon/dell-smm-hwmon.c
+++ b/drivers/hwmon/dell-smm-hwmon.c
@@ -1074,6 +1074,13 @@
DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "Vostro 3360"),
},
},
+ {
+ .ident = "Dell XPS13 9333",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_EXACT_MATCH(DMI_PRODUCT_NAME, "XPS13 9333"),
+ },
+ },
{ }
};
diff --git a/drivers/hwmon/nct6775.c b/drivers/hwmon/nct6775.c
index 155d4d1..f9d1349 100644
--- a/drivers/hwmon/nct6775.c
+++ b/drivers/hwmon/nct6775.c
@@ -4175,7 +4175,7 @@
* The temperature is already monitored if the respective bit in <mask>
* is set.
*/
- for (i = 0; i < 32; i++) {
+ for (i = 0; i < 31; i++) {
if (!(data->temp_mask & BIT(i + 1)))
continue;
if (!reg_temp_alternate[i])
diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c
index 0f52d44..f5fe010 100644
--- a/drivers/irqchip/irq-gic-v2m.c
+++ b/drivers/irqchip/irq-gic-v2m.c
@@ -199,7 +199,7 @@
fail:
irq_domain_free_irqs_parent(domain, virq, nr_irqs);
- gicv2m_unalloc_msi(v2m, hwirq, get_count_order(nr_irqs));
+ gicv2m_unalloc_msi(v2m, hwirq, nr_irqs);
return err;
}
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 5377d7e..d7842d3 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -182,6 +182,22 @@
return its->collections + its_dev->event_map.col_map[event];
}
+static struct its_collection *valid_col(struct its_collection *col)
+{
+ if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(0, 15)))
+ return NULL;
+
+ return col;
+}
+
+static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
+{
+ if (valid_col(its->collections + vpe->col_idx))
+ return vpe;
+
+ return NULL;
+}
+
/*
* ITS command descriptors - parameters to be encoded in a command
* block.
@@ -439,7 +455,7 @@
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_movi_cmd(struct its_node *its,
@@ -458,7 +474,7 @@
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_discard_cmd(struct its_node *its,
@@ -476,7 +492,7 @@
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_inv_cmd(struct its_node *its,
@@ -494,7 +510,7 @@
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_int_cmd(struct its_node *its,
@@ -512,7 +528,7 @@
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_clear_cmd(struct its_node *its,
@@ -530,7 +546,7 @@
its_fixup_cmd(cmd);
- return col;
+ return valid_col(col);
}
static struct its_collection *its_build_invall_cmd(struct its_node *its,
@@ -554,7 +570,7 @@
its_fixup_cmd(cmd);
- return desc->its_vinvall_cmd.vpe;
+ return valid_vpe(its, desc->its_vinvall_cmd.vpe);
}
static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
@@ -576,7 +592,7 @@
its_fixup_cmd(cmd);
- return desc->its_vmapp_cmd.vpe;
+ return valid_vpe(its, desc->its_vmapp_cmd.vpe);
}
static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
@@ -599,7 +615,7 @@
its_fixup_cmd(cmd);
- return desc->its_vmapti_cmd.vpe;
+ return valid_vpe(its, desc->its_vmapti_cmd.vpe);
}
static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
@@ -622,7 +638,7 @@
its_fixup_cmd(cmd);
- return desc->its_vmovi_cmd.vpe;
+ return valid_vpe(its, desc->its_vmovi_cmd.vpe);
}
static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
@@ -640,7 +656,7 @@
its_fixup_cmd(cmd);
- return desc->its_vmovp_cmd.vpe;
+ return valid_vpe(its, desc->its_vmovp_cmd.vpe);
}
static u64 its_cmd_ptr_to_offset(struct its_node *its,
@@ -1824,11 +1840,16 @@
static int its_alloc_collections(struct its_node *its)
{
+ int i;
+
its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
GFP_KERNEL);
if (!its->collections)
return -ENOMEM;
+ for (i = 0; i < nr_cpu_ids; i++)
+ its->collections[i].target_address = ~0ULL;
+
return 0;
}
@@ -2310,7 +2331,14 @@
cpu_mask = cpumask_of_node(its_dev->its->numa_node);
/* Bind the LPI to the first possible CPU */
- cpu = cpumask_first(cpu_mask);
+ cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
+ if (cpu >= nr_cpu_ids) {
+ if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
+ return -EINVAL;
+
+ cpu = cpumask_first(cpu_online_mask);
+ }
+
its_dev->event_map.col_map[event] = cpu;
irq_data_update_effective_affinity(d, cpumask_of(cpu));
@@ -3399,6 +3427,16 @@
u64 timeout = USEC_PER_SEC;
u64 val;
+ /*
+ * If coming via a CPU hotplug event, we don't need to disable
+ * LPIs before trying to re-enable them. They are already
+ * configured and all is well in the world. Detect this case
+ * by checking the allocation of the pending table for the
+ * current CPU.
+ */
+ if (gic_data_rdist()->pend_page)
+ return 0;
+
if (!gic_rdists_supports_plpis()) {
pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
return -ENXIO;
diff --git a/drivers/irqchip/irq-ls-scfg-msi.c b/drivers/irqchip/irq-ls-scfg-msi.c
index 1ec3bfe..c671b32 100644
--- a/drivers/irqchip/irq-ls-scfg-msi.c
+++ b/drivers/irqchip/irq-ls-scfg-msi.c
@@ -93,8 +93,12 @@
msg->address_lo = lower_32_bits(msi_data->msiir_addr);
msg->data = data->hwirq;
- if (msi_affinity_flag)
- msg->data |= cpumask_first(data->common->affinity);
+ if (msi_affinity_flag) {
+ const struct cpumask *mask;
+
+ mask = irq_data_get_effective_affinity_mask(data);
+ msg->data |= cpumask_first(mask);
+ }
iommu_dma_map_msi_msg(data->irq, msg);
}
@@ -121,7 +125,7 @@
return -EINVAL;
}
- cpumask_copy(irq_data->common->affinity, mask);
+ irq_data_update_effective_affinity(irq_data, cpumask_of(cpu));
return IRQ_SET_MASK_OK;
}
diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig
index 10c0898..9c03f35 100644
--- a/drivers/lightnvm/Kconfig
+++ b/drivers/lightnvm/Kconfig
@@ -4,7 +4,7 @@
menuconfig NVM
bool "Open-Channel SSD target support"
- depends on BLOCK && HAS_DMA && PCI
+ depends on BLOCK && PCI
select BLK_DEV_NVME
help
Say Y here to get to enable Open-channel SSDs.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index 21710a7..46df030 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1808,6 +1808,7 @@
u32 max_segments =
(ctrl->max_hw_sectors / (ctrl->page_size >> 9)) + 1;
+ max_segments = min_not_zero(max_segments, ctrl->max_segments);
blk_queue_max_hw_sectors(q, ctrl->max_hw_sectors);
blk_queue_max_segments(q, min_t(u32, max_segments, USHRT_MAX));
}
diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c
index b528a2f..41d45a1b 100644
--- a/drivers/nvme/host/fc.c
+++ b/drivers/nvme/host/fc.c
@@ -2790,6 +2790,9 @@
/* re-enable the admin_q so anything new can fast fail */
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
+ /* resume the io queues so that things will fast fail */
+ nvme_start_queues(&ctrl->ctrl);
+
nvme_fc_ctlr_inactive_on_rport(ctrl);
}
@@ -2804,9 +2807,6 @@
* waiting for io to terminate
*/
nvme_fc_delete_association(ctrl);
-
- /* resume the io queues so that things will fast fail */
- nvme_start_queues(nctrl);
}
static void
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 231807c..0c4a33d 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -170,6 +170,7 @@
u64 cap;
u32 page_size;
u32 max_hw_sectors;
+ u32 max_segments;
u16 oncs;
u16 oacs;
u16 nssa;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index fc33804..ba943f2 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -38,6 +38,13 @@
#define SGES_PER_PAGE (PAGE_SIZE / sizeof(struct nvme_sgl_desc))
+/*
+ * These can be higher, but we need to ensure that any command doesn't
+ * require an sg allocation that needs more than a page of data.
+ */
+#define NVME_MAX_KB_SZ 4096
+#define NVME_MAX_SEGS 127
+
static int use_threaded_interrupts;
module_param(use_threaded_interrupts, int, 0);
@@ -100,6 +107,8 @@
struct nvme_ctrl ctrl;
struct completion ioq_wait;
+ mempool_t *iod_mempool;
+
/* shadow doorbell buffer support: */
u32 *dbbuf_dbs;
dma_addr_t dbbuf_dbs_dma_addr;
@@ -477,10 +486,7 @@
iod->use_sgl = nvme_pci_use_sgls(dev, rq);
if (nseg > NVME_INT_PAGES || size > NVME_INT_BYTES(dev)) {
- size_t alloc_size = nvme_pci_iod_alloc_size(dev, size, nseg,
- iod->use_sgl);
-
- iod->sg = kmalloc(alloc_size, GFP_ATOMIC);
+ iod->sg = mempool_alloc(dev->iod_mempool, GFP_ATOMIC);
if (!iod->sg)
return BLK_STS_RESOURCE;
} else {
@@ -526,7 +532,7 @@
}
if (iod->sg != iod->inline_sg)
- kfree(iod->sg);
+ mempool_free(iod->sg, dev->iod_mempool);
}
#ifdef CONFIG_BLK_DEV_INTEGRITY
@@ -2280,6 +2286,7 @@
blk_put_queue(dev->ctrl.admin_q);
kfree(dev->queues);
free_opal_dev(dev->ctrl.opal_dev);
+ mempool_destroy(dev->iod_mempool);
kfree(dev);
}
@@ -2289,6 +2296,7 @@
nvme_get_ctrl(&dev->ctrl);
nvme_dev_disable(dev, false);
+ nvme_kill_queues(&dev->ctrl);
if (!queue_work(nvme_wq, &dev->remove_work))
nvme_put_ctrl(&dev->ctrl);
}
@@ -2333,6 +2341,13 @@
if (result)
goto out;
+ /*
+ * Limit the max command size to prevent iod->sg allocations going
+ * over a single page.
+ */
+ dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+ dev->ctrl.max_segments = NVME_MAX_SEGS;
+
result = nvme_init_identify(&dev->ctrl);
if (result)
goto out;
@@ -2405,7 +2420,6 @@
struct nvme_dev *dev = container_of(work, struct nvme_dev, remove_work);
struct pci_dev *pdev = to_pci_dev(dev->dev);
- nvme_kill_queues(&dev->ctrl);
if (pci_get_drvdata(pdev))
device_release_driver(&pdev->dev);
nvme_put_ctrl(&dev->ctrl);
@@ -2509,6 +2523,7 @@
int node, result = -ENOMEM;
struct nvme_dev *dev;
unsigned long quirks = id->driver_data;
+ size_t alloc_size;
node = dev_to_node(&pdev->dev);
if (node == NUMA_NO_NODE)
@@ -2546,6 +2561,23 @@
if (result)
goto release_pools;
+ /*
+ * Double check that our mempool alloc size will cover the biggest
+ * command we support.
+ */
+ alloc_size = nvme_pci_iod_alloc_size(dev, NVME_MAX_KB_SZ,
+ NVME_MAX_SEGS, true);
+ WARN_ON_ONCE(alloc_size > PAGE_SIZE);
+
+ dev->iod_mempool = mempool_create_node(1, mempool_kmalloc,
+ mempool_kfree,
+ (void *) alloc_size,
+ GFP_KERNEL, node);
+ if (!dev->iod_mempool) {
+ result = -ENOMEM;
+ goto release_pools;
+ }
+
dev_info(dev->ctrl.device, "pci function %s\n", dev_name(&pdev->dev));
nvme_get_ctrl(&dev->ctrl);
diff --git a/drivers/nvme/host/rdma.c b/drivers/nvme/host/rdma.c
index c9424da..9544625 100644
--- a/drivers/nvme/host/rdma.c
+++ b/drivers/nvme/host/rdma.c
@@ -560,12 +560,6 @@
if (!test_and_clear_bit(NVME_RDMA_Q_ALLOCATED, &queue->flags))
return;
- if (nvme_rdma_queue_idx(queue) == 0) {
- nvme_rdma_free_qe(queue->device->dev,
- &queue->ctrl->async_event_sqe,
- sizeof(struct nvme_command), DMA_TO_DEVICE);
- }
-
nvme_rdma_destroy_queue_ib(queue);
rdma_destroy_id(queue->cm_id);
}
@@ -698,7 +692,7 @@
set = &ctrl->tag_set;
memset(set, 0, sizeof(*set));
set->ops = &nvme_rdma_mq_ops;
- set->queue_depth = nctrl->opts->queue_size;
+ set->queue_depth = nctrl->sqsize + 1;
set->reserved_tags = 1; /* fabric connect */
set->numa_node = NUMA_NO_NODE;
set->flags = BLK_MQ_F_SHOULD_MERGE;
@@ -734,11 +728,12 @@
static void nvme_rdma_destroy_admin_queue(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- nvme_rdma_stop_queue(&ctrl->queues[0]);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.admin_q);
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
}
+ nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
nvme_rdma_free_queue(&ctrl->queues[0]);
}
@@ -755,11 +750,16 @@
ctrl->max_fr_pages = nvme_rdma_get_max_fr_pages(ctrl->device->dev);
+ error = nvme_rdma_alloc_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
+ if (error)
+ goto out_free_queue;
+
if (new) {
ctrl->ctrl.admin_tagset = nvme_rdma_alloc_tagset(&ctrl->ctrl, true);
if (IS_ERR(ctrl->ctrl.admin_tagset)) {
error = PTR_ERR(ctrl->ctrl.admin_tagset);
- goto out_free_queue;
+ goto out_free_async_qe;
}
ctrl->ctrl.admin_q = blk_mq_init_queue(&ctrl->admin_tag_set);
@@ -795,12 +795,6 @@
if (error)
goto out_stop_queue;
- error = nvme_rdma_alloc_qe(ctrl->queues[0].device->dev,
- &ctrl->async_event_sqe, sizeof(struct nvme_command),
- DMA_TO_DEVICE);
- if (error)
- goto out_stop_queue;
-
return 0;
out_stop_queue:
@@ -811,6 +805,9 @@
out_free_tagset:
if (new)
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.admin_tagset);
+out_free_async_qe:
+ nvme_rdma_free_qe(ctrl->device->dev, &ctrl->async_event_sqe,
+ sizeof(struct nvme_command), DMA_TO_DEVICE);
out_free_queue:
nvme_rdma_free_queue(&ctrl->queues[0]);
return error;
@@ -819,7 +816,6 @@
static void nvme_rdma_destroy_io_queues(struct nvme_rdma_ctrl *ctrl,
bool remove)
{
- nvme_rdma_stop_io_queues(ctrl);
if (remove) {
blk_cleanup_queue(ctrl->ctrl.connect_q);
nvme_rdma_free_tagset(&ctrl->ctrl, ctrl->ctrl.tagset);
@@ -888,9 +884,9 @@
list_del(&ctrl->list);
mutex_unlock(&nvme_rdma_ctrl_mutex);
- kfree(ctrl->queues);
nvmf_free_options(nctrl->opts);
free_ctrl:
+ kfree(ctrl->queues);
kfree(ctrl);
}
@@ -949,6 +945,7 @@
return;
destroy_admin:
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_rdma_destroy_admin_queue(ctrl, false);
requeue:
dev_info(ctrl->ctrl.device, "Failed reconnect attempt %d\n",
@@ -965,12 +962,14 @@
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_cancel_request, &ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, false);
}
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_cancel_request, &ctrl->ctrl);
nvme_rdma_destroy_admin_queue(ctrl, false);
@@ -1736,6 +1735,7 @@
{
if (ctrl->ctrl.queue_count > 1) {
nvme_stop_queues(&ctrl->ctrl);
+ nvme_rdma_stop_io_queues(ctrl);
blk_mq_tagset_busy_iter(&ctrl->tag_set,
nvme_cancel_request, &ctrl->ctrl);
nvme_rdma_destroy_io_queues(ctrl, shutdown);
@@ -1747,6 +1747,7 @@
nvme_disable_ctrl(&ctrl->ctrl, ctrl->ctrl.cap);
blk_mq_quiesce_queue(ctrl->ctrl.admin_q);
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
blk_mq_tagset_busy_iter(&ctrl->admin_tag_set,
nvme_cancel_request, &ctrl->ctrl);
blk_mq_unquiesce_queue(ctrl->ctrl.admin_q);
@@ -1932,11 +1933,6 @@
goto out_free_ctrl;
}
- ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
- 0 /* no quirks, we're perfect! */);
- if (ret)
- goto out_free_ctrl;
-
INIT_DELAYED_WORK(&ctrl->reconnect_work,
nvme_rdma_reconnect_ctrl_work);
INIT_WORK(&ctrl->err_work, nvme_rdma_error_recovery_work);
@@ -1950,14 +1946,19 @@
ctrl->queues = kcalloc(ctrl->ctrl.queue_count, sizeof(*ctrl->queues),
GFP_KERNEL);
if (!ctrl->queues)
- goto out_uninit_ctrl;
+ goto out_free_ctrl;
+
+ ret = nvme_init_ctrl(&ctrl->ctrl, dev, &nvme_rdma_ctrl_ops,
+ 0 /* no quirks, we're perfect! */);
+ if (ret)
+ goto out_kfree_queues;
changed = nvme_change_ctrl_state(&ctrl->ctrl, NVME_CTRL_CONNECTING);
WARN_ON_ONCE(!changed);
ret = nvme_rdma_configure_admin_queue(ctrl, true);
if (ret)
- goto out_kfree_queues;
+ goto out_uninit_ctrl;
/* sanity check icdoff */
if (ctrl->ctrl.icdoff) {
@@ -1974,20 +1975,19 @@
goto out_remove_admin_queue;
}
- if (opts->queue_size > ctrl->ctrl.maxcmd) {
- /* warn if maxcmd is lower than queue_size */
- dev_warn(ctrl->ctrl.device,
- "queue_size %zu > ctrl maxcmd %u, clamping down\n",
- opts->queue_size, ctrl->ctrl.maxcmd);
- opts->queue_size = ctrl->ctrl.maxcmd;
- }
-
+ /* only warn if argument is too large here, will clamp later */
if (opts->queue_size > ctrl->ctrl.sqsize + 1) {
- /* warn if sqsize is lower than queue_size */
dev_warn(ctrl->ctrl.device,
"queue_size %zu > ctrl sqsize %u, clamping down\n",
opts->queue_size, ctrl->ctrl.sqsize + 1);
- opts->queue_size = ctrl->ctrl.sqsize + 1;
+ }
+
+ /* warn if maxcmd is lower than sqsize+1 */
+ if (ctrl->ctrl.sqsize + 1 > ctrl->ctrl.maxcmd) {
+ dev_warn(ctrl->ctrl.device,
+ "sqsize %u > ctrl maxcmd %u, clamping down\n",
+ ctrl->ctrl.sqsize + 1, ctrl->ctrl.maxcmd);
+ ctrl->ctrl.sqsize = ctrl->ctrl.maxcmd - 1;
}
if (opts->nr_io_queues) {
@@ -2013,15 +2013,16 @@
return &ctrl->ctrl;
out_remove_admin_queue:
+ nvme_rdma_stop_queue(&ctrl->queues[0]);
nvme_rdma_destroy_admin_queue(ctrl, true);
-out_kfree_queues:
- kfree(ctrl->queues);
out_uninit_ctrl:
nvme_uninit_ctrl(&ctrl->ctrl);
nvme_put_ctrl(&ctrl->ctrl);
if (ret > 0)
ret = -EIO;
return ERR_PTR(ret);
+out_kfree_queues:
+ kfree(ctrl->queues);
out_free_ctrl:
kfree(ctrl);
return ERR_PTR(ret);
diff --git a/drivers/nvme/target/core.c b/drivers/nvme/target/core.c
index a03da76..74d4b78 100644
--- a/drivers/nvme/target/core.c
+++ b/drivers/nvme/target/core.c
@@ -686,6 +686,14 @@
}
ctrl->csts = NVME_CSTS_RDY;
+
+ /*
+ * Controllers that are not yet enabled should not really enforce the
+ * keep alive timeout, but we still want to track a timeout and cleanup
+ * in case a host died before it enabled the controller. Hence, simply
+ * reset the keep alive timer when the controller is enabled.
+ */
+ mod_delayed_work(system_wq, &ctrl->ka_work, ctrl->kato * HZ);
}
static void nvmet_clear_ctrl(struct nvmet_ctrl *ctrl)
diff --git a/drivers/opp/core.c b/drivers/opp/core.c
index ab2f3fe..31ff03d 100644
--- a/drivers/opp/core.c
+++ b/drivers/opp/core.c
@@ -598,7 +598,7 @@
}
/* Scaling up? Scale voltage before frequency */
- if (freq > old_freq) {
+ if (freq >= old_freq) {
ret = _set_opp_voltage(dev, reg, new_supply);
if (ret)
goto restore_voltage;
diff --git a/drivers/pinctrl/actions/pinctrl-owl.c b/drivers/pinctrl/actions/pinctrl-owl.c
index 76243ca..b5c880b 100644
--- a/drivers/pinctrl/actions/pinctrl-owl.c
+++ b/drivers/pinctrl/actions/pinctrl-owl.c
@@ -333,7 +333,7 @@
unsigned long flags;
unsigned int param;
u32 reg, bit, width, arg;
- int ret, i;
+ int ret = 0, i;
info = &pctrl->soc->padinfo[pin];
diff --git a/drivers/pinctrl/devicetree.c b/drivers/pinctrl/devicetree.c
index b601039..c4aa411 100644
--- a/drivers/pinctrl/devicetree.c
+++ b/drivers/pinctrl/devicetree.c
@@ -101,10 +101,11 @@
}
static int dt_to_map_one_config(struct pinctrl *p,
- struct pinctrl_dev *pctldev,
+ struct pinctrl_dev *hog_pctldev,
const char *statename,
struct device_node *np_config)
{
+ struct pinctrl_dev *pctldev = NULL;
struct device_node *np_pctldev;
const struct pinctrl_ops *ops;
int ret;
@@ -123,8 +124,10 @@
return -EPROBE_DEFER;
}
/* If we're creating a hog we can use the passed pctldev */
- if (pctldev && (np_pctldev == p->dev->of_node))
+ if (hog_pctldev && (np_pctldev == p->dev->of_node)) {
+ pctldev = hog_pctldev;
break;
+ }
pctldev = get_pinctrl_dev_from_of_node(np_pctldev);
if (pctldev)
break;
diff --git a/drivers/pinctrl/mediatek/pinctrl-mt7622.c b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
index ad6da11..e3f1ab2 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mt7622.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mt7622.c
@@ -1459,6 +1459,9 @@
struct mtk_pinctrl *hw = gpiochip_get_data(chip);
unsigned long eint_n;
+ if (!hw->eint)
+ return -ENOTSUPP;
+
eint_n = offset;
return mtk_eint_find_irq(hw->eint, eint_n);
@@ -1471,7 +1474,8 @@
unsigned long eint_n;
u32 debounce;
- if (pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
+ if (!hw->eint ||
+ pinconf_to_config_param(config) != PIN_CONFIG_INPUT_DEBOUNCE)
return -ENOTSUPP;
debounce = pinconf_to_config_argument(config);
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index b379969..16ff56f9 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -1000,11 +1000,6 @@
return -ENOMEM;
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- if (!res) {
- dev_err(&pdev->dev, "Unable to get eint resource\n");
- return -ENODEV;
- }
-
pctl->eint->base = devm_ioremap_resource(&pdev->dev, res);
if (IS_ERR(pctl->eint->base))
return PTR_ERR(pctl->eint->base);
diff --git a/drivers/pinctrl/pinctrl-single.c b/drivers/pinctrl/pinctrl-single.c
index b3153c0..e5647da 100644
--- a/drivers/pinctrl/pinctrl-single.c
+++ b/drivers/pinctrl/pinctrl-single.c
@@ -1590,8 +1590,11 @@
mux_bytes = pcs->width / BITS_PER_BYTE;
- if (!pcs->saved_vals)
+ if (!pcs->saved_vals) {
pcs->saved_vals = devm_kzalloc(pcs->dev, pcs->size, GFP_ATOMIC);
+ if (!pcs->saved_vals)
+ return -ENOMEM;
+ }
switch (pcs->width) {
case 64:
@@ -1651,8 +1654,13 @@
if (!pcs)
return -EINVAL;
- if (pcs->flags & PCS_CONTEXT_LOSS_OFF)
- pcs_save_context(pcs);
+ if (pcs->flags & PCS_CONTEXT_LOSS_OFF) {
+ int ret;
+
+ ret = pcs_save_context(pcs);
+ if (ret < 0)
+ return ret;
+ }
return pinctrl_force_sleep(pcs->pctl);
}
diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
index 1da3d71..1394810 100644
--- a/drivers/scsi/scsi_transport_fc.c
+++ b/drivers/scsi/scsi_transport_fc.c
@@ -3592,7 +3592,7 @@
/* the blk_end_sync_io() doesn't check the error */
if (inflight)
- blk_mq_complete_request(req);
+ __blk_complete_request(req);
return BLK_EH_DONE;
}
diff --git a/drivers/scsi/xen-scsifront.c b/drivers/scsi/xen-scsifront.c
index 36f59a1..61389bd 100644
--- a/drivers/scsi/xen-scsifront.c
+++ b/drivers/scsi/xen-scsifront.c
@@ -654,10 +654,17 @@
static int scsifront_sdev_configure(struct scsi_device *sdev)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
+ int err;
- if (info && current == info->curr)
- xenbus_printf(XBT_NIL, info->dev->nodename,
+ if (info && current == info->curr) {
+ err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateConnected);
+ if (err) {
+ xenbus_dev_error(info->dev, err,
+ "%s: writing dev_state_path", __func__);
+ return err;
+ }
+ }
return 0;
}
@@ -665,10 +672,15 @@
static void scsifront_sdev_destroy(struct scsi_device *sdev)
{
struct vscsifrnt_info *info = shost_priv(sdev->host);
+ int err;
- if (info && current == info->curr)
- xenbus_printf(XBT_NIL, info->dev->nodename,
+ if (info && current == info->curr) {
+ err = xenbus_printf(XBT_NIL, info->dev->nodename,
info->dev_state_path, "%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(info->dev, err,
+ "%s: writing dev_state_path", __func__);
+ }
}
static struct scsi_host_template scsifront_sht = {
@@ -1003,9 +1015,12 @@
if (scsi_add_device(info->host, chn, tgt, lun)) {
dev_err(&dev->dev, "scsi_add_device\n");
- xenbus_printf(XBT_NIL, dev->nodename,
+ err = xenbus_printf(XBT_NIL, dev->nodename,
info->dev_state_path,
"%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(dev, err,
+ "%s: writing dev_state_path", __func__);
}
break;
case VSCSIFRONT_OP_DEL_LUN:
@@ -1019,10 +1034,14 @@
}
break;
case VSCSIFRONT_OP_READD_LUN:
- if (device_state == XenbusStateConnected)
- xenbus_printf(XBT_NIL, dev->nodename,
+ if (device_state == XenbusStateConnected) {
+ err = xenbus_printf(XBT_NIL, dev->nodename,
info->dev_state_path,
"%d", XenbusStateConnected);
+ if (err)
+ xenbus_dev_error(dev, err,
+ "%s: writing dev_state_path", __func__);
+ }
break;
default:
break;
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 451e833..48b1542 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -41,4 +41,4 @@
xen-evtchn-y := evtchn.o
xen-gntdev-y := gntdev.o
xen-gntalloc-y := gntalloc.o
-xen-privcmd-y := privcmd.o
+xen-privcmd-y := privcmd.o privcmd-buf.o
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 762378f..08e4af0 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -628,8 +628,6 @@
xen_irq_info_cleanup(info);
}
- BUG_ON(info_for_irq(irq)->type == IRQT_UNBOUND);
-
xen_free_irq(irq);
}
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index 2473b0a..ba9f3ee 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -799,7 +799,7 @@
return 0;
}
-EXPORT_SYMBOL(gnttab_alloc_pages);
+EXPORT_SYMBOL_GPL(gnttab_alloc_pages);
/**
* gnttab_free_pages - free pages allocated by gnttab_alloc_pages()
@@ -820,7 +820,7 @@
}
free_xenballooned_pages(nr_pages, pages);
}
-EXPORT_SYMBOL(gnttab_free_pages);
+EXPORT_SYMBOL_GPL(gnttab_free_pages);
/* Handling of paged out grant targets (GNTST_eagain) */
#define MAX_DELAY 256
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 8835065..c93d8ef 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -289,8 +289,15 @@
return;
}
- if (sysrq_key != '\0')
- xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+ if (sysrq_key != '\0') {
+ err = xenbus_printf(xbt, "control", "sysrq", "%c", '\0');
+ if (err) {
+ pr_err("%s: Error %d writing sysrq in control/sysrq\n",
+ __func__, err);
+ xenbus_transaction_end(xbt, 1);
+ return;
+ }
+ }
err = xenbus_transaction_end(xbt, 0);
if (err == -EAGAIN)
@@ -342,7 +349,12 @@
continue;
snprintf(node, FEATURE_PATH_SIZE, "feature-%s",
shutdown_handlers[idx].command);
- xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+ err = xenbus_printf(XBT_NIL, "control", node, "%u", 1);
+ if (err) {
+ pr_err("%s: Error %d writing %s\n", __func__,
+ err, node);
+ return err;
+ }
}
return 0;
diff --git a/drivers/xen/privcmd-buf.c b/drivers/xen/privcmd-buf.c
new file mode 100644
index 0000000..df1ed37
--- /dev/null
+++ b/drivers/xen/privcmd-buf.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0 OR MIT
+
+/******************************************************************************
+ * privcmd-buf.c
+ *
+ * Mmap of hypercall buffers.
+ *
+ * Copyright (c) 2018 Juergen Gross
+ */
+
+#define pr_fmt(fmt) "xen:" KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/miscdevice.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+
+#include "privcmd.h"
+
+MODULE_LICENSE("GPL");
+
+static unsigned int limit = 64;
+module_param(limit, uint, 0644);
+MODULE_PARM_DESC(limit, "Maximum number of pages that may be allocated by "
+ "the privcmd-buf device per open file");
+
+struct privcmd_buf_private {
+ struct mutex lock;
+ struct list_head list;
+ unsigned int allocated;
+};
+
+struct privcmd_buf_vma_private {
+ struct privcmd_buf_private *file_priv;
+ struct list_head list;
+ unsigned int users;
+ unsigned int n_pages;
+ struct page *pages[];
+};
+
+static int privcmd_buf_open(struct inode *ino, struct file *file)
+{
+ struct privcmd_buf_private *file_priv;
+
+ file_priv = kzalloc(sizeof(*file_priv), GFP_KERNEL);
+ if (!file_priv)
+ return -ENOMEM;
+
+ mutex_init(&file_priv->lock);
+ INIT_LIST_HEAD(&file_priv->list);
+
+ file->private_data = file_priv;
+
+ return 0;
+}
+
+static void privcmd_buf_vmapriv_free(struct privcmd_buf_vma_private *vma_priv)
+{
+ unsigned int i;
+
+ vma_priv->file_priv->allocated -= vma_priv->n_pages;
+
+ list_del(&vma_priv->list);
+
+ for (i = 0; i < vma_priv->n_pages; i++)
+ if (vma_priv->pages[i])
+ __free_page(vma_priv->pages[i]);
+
+ kfree(vma_priv);
+}
+
+static int privcmd_buf_release(struct inode *ino, struct file *file)
+{
+ struct privcmd_buf_private *file_priv = file->private_data;
+ struct privcmd_buf_vma_private *vma_priv;
+
+ mutex_lock(&file_priv->lock);
+
+ while (!list_empty(&file_priv->list)) {
+ vma_priv = list_first_entry(&file_priv->list,
+ struct privcmd_buf_vma_private,
+ list);
+ privcmd_buf_vmapriv_free(vma_priv);
+ }
+
+ mutex_unlock(&file_priv->lock);
+
+ kfree(file_priv);
+
+ return 0;
+}
+
+static void privcmd_buf_vma_open(struct vm_area_struct *vma)
+{
+ struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+
+ if (!vma_priv)
+ return;
+
+ mutex_lock(&vma_priv->file_priv->lock);
+ vma_priv->users++;
+ mutex_unlock(&vma_priv->file_priv->lock);
+}
+
+static void privcmd_buf_vma_close(struct vm_area_struct *vma)
+{
+ struct privcmd_buf_vma_private *vma_priv = vma->vm_private_data;
+ struct privcmd_buf_private *file_priv;
+
+ if (!vma_priv)
+ return;
+
+ file_priv = vma_priv->file_priv;
+
+ mutex_lock(&file_priv->lock);
+
+ vma_priv->users--;
+ if (!vma_priv->users)
+ privcmd_buf_vmapriv_free(vma_priv);
+
+ mutex_unlock(&file_priv->lock);
+}
+
+static vm_fault_t privcmd_buf_vma_fault(struct vm_fault *vmf)
+{
+ pr_debug("fault: vma=%p %lx-%lx, pgoff=%lx, uv=%p\n",
+ vmf->vma, vmf->vma->vm_start, vmf->vma->vm_end,
+ vmf->pgoff, (void *)vmf->address);
+
+ return VM_FAULT_SIGBUS;
+}
+
+static const struct vm_operations_struct privcmd_buf_vm_ops = {
+ .open = privcmd_buf_vma_open,
+ .close = privcmd_buf_vma_close,
+ .fault = privcmd_buf_vma_fault,
+};
+
+static int privcmd_buf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ struct privcmd_buf_private *file_priv = file->private_data;
+ struct privcmd_buf_vma_private *vma_priv;
+ unsigned long count = vma_pages(vma);
+ unsigned int i;
+ int ret = 0;
+
+ if (!(vma->vm_flags & VM_SHARED) || count > limit ||
+ file_priv->allocated + count > limit)
+ return -EINVAL;
+
+ vma_priv = kzalloc(sizeof(*vma_priv) + count * sizeof(void *),
+ GFP_KERNEL);
+ if (!vma_priv)
+ return -ENOMEM;
+
+ vma_priv->n_pages = count;
+ count = 0;
+ for (i = 0; i < vma_priv->n_pages; i++) {
+ vma_priv->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
+ if (!vma_priv->pages[i])
+ break;
+ count++;
+ }
+
+ mutex_lock(&file_priv->lock);
+
+ file_priv->allocated += count;
+
+ vma_priv->file_priv = file_priv;
+ vma_priv->users = 1;
+
+ vma->vm_flags |= VM_IO | VM_DONTEXPAND;
+ vma->vm_ops = &privcmd_buf_vm_ops;
+ vma->vm_private_data = vma_priv;
+
+ list_add(&vma_priv->list, &file_priv->list);
+
+ if (vma_priv->n_pages != count)
+ ret = -ENOMEM;
+ else
+ for (i = 0; i < vma_priv->n_pages; i++) {
+ ret = vm_insert_page(vma, vma->vm_start + i * PAGE_SIZE,
+ vma_priv->pages[i]);
+ if (ret)
+ break;
+ }
+
+ if (ret)
+ privcmd_buf_vmapriv_free(vma_priv);
+
+ mutex_unlock(&file_priv->lock);
+
+ return ret;
+}
+
+const struct file_operations xen_privcmdbuf_fops = {
+ .owner = THIS_MODULE,
+ .open = privcmd_buf_open,
+ .release = privcmd_buf_release,
+ .mmap = privcmd_buf_mmap,
+};
+EXPORT_SYMBOL_GPL(xen_privcmdbuf_fops);
+
+struct miscdevice xen_privcmdbuf_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "xen/hypercall",
+ .fops = &xen_privcmdbuf_fops,
+};
diff --git a/drivers/xen/privcmd.c b/drivers/xen/privcmd.c
index 8ae0349..7e6e6821 100644
--- a/drivers/xen/privcmd.c
+++ b/drivers/xen/privcmd.c
@@ -1007,12 +1007,21 @@
pr_err("Could not register Xen privcmd device\n");
return err;
}
+
+ err = misc_register(&xen_privcmdbuf_dev);
+ if (err != 0) {
+ pr_err("Could not register Xen hypercall-buf device\n");
+ misc_deregister(&privcmd_dev);
+ return err;
+ }
+
return 0;
}
static void __exit privcmd_exit(void)
{
misc_deregister(&privcmd_dev);
+ misc_deregister(&xen_privcmdbuf_dev);
}
module_init(privcmd_init);
diff --git a/drivers/xen/privcmd.h b/drivers/xen/privcmd.h
index 14facae..0dd9f8f 100644
--- a/drivers/xen/privcmd.h
+++ b/drivers/xen/privcmd.h
@@ -1,3 +1,6 @@
#include <linux/fs.h>
extern const struct file_operations xen_privcmd_fops;
+extern const struct file_operations xen_privcmdbuf_fops;
+
+extern struct miscdevice xen_privcmdbuf_dev;
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index 7bc88fd..e2f3e8b 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -1012,6 +1012,7 @@
{
struct v2p_entry *entry;
unsigned long flags;
+ int err;
if (try) {
spin_lock_irqsave(&info->v2p_lock, flags);
@@ -1027,8 +1028,11 @@
scsiback_del_translation_entry(info, vir);
}
} else if (!try) {
- xenbus_printf(XBT_NIL, info->dev->nodename, state,
+ err = xenbus_printf(XBT_NIL, info->dev->nodename, state,
"%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(info->dev, err,
+ "%s: writing %s", __func__, state);
}
}
@@ -1067,8 +1071,11 @@
snprintf(str, sizeof(str), "vscsi-devs/%s/p-dev", ent);
val = xenbus_read(XBT_NIL, dev->nodename, str, NULL);
if (IS_ERR(val)) {
- xenbus_printf(XBT_NIL, dev->nodename, state,
+ err = xenbus_printf(XBT_NIL, dev->nodename, state,
"%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(info->dev, err,
+ "%s: writing %s", __func__, state);
return;
}
strlcpy(phy, val, VSCSI_NAMELEN);
@@ -1079,8 +1086,11 @@
err = xenbus_scanf(XBT_NIL, dev->nodename, str, "%u:%u:%u:%u",
&vir.hst, &vir.chn, &vir.tgt, &vir.lun);
if (XENBUS_EXIST_ERR(err)) {
- xenbus_printf(XBT_NIL, dev->nodename, state,
+ err = xenbus_printf(XBT_NIL, dev->nodename, state,
"%d", XenbusStateClosed);
+ if (err)
+ xenbus_dev_error(info->dev, err,
+ "%s: writing %s", __func__, state);
return;
}
diff --git a/fs/ext2/ext2.h b/fs/ext2/ext2.h
index cc40802..00e759f 100644
--- a/fs/ext2/ext2.h
+++ b/fs/ext2/ext2.h
@@ -748,7 +748,6 @@
unsigned long);
extern unsigned long ext2_count_free_blocks (struct super_block *);
extern unsigned long ext2_count_dirs (struct super_block *);
-extern void ext2_check_blocks_bitmap (struct super_block *);
extern struct ext2_group_desc * ext2_get_group_desc(struct super_block * sb,
unsigned int block_group,
struct buffer_head ** bh);
@@ -771,7 +770,6 @@
extern struct inode * ext2_new_inode (struct inode *, umode_t, const struct qstr *);
extern void ext2_free_inode (struct inode *);
extern unsigned long ext2_count_free_inodes (struct super_block *);
-extern void ext2_check_inodes_bitmap (struct super_block *);
extern unsigned long ext2_count_free (struct buffer_head *, unsigned);
/* inode.c */
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 25ab127..8ff53f8 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -557,6 +557,9 @@
set_opt (opts->s_mount_opt, NO_UID32);
break;
case Opt_nocheck:
+ ext2_msg(sb, KERN_WARNING,
+ "Option nocheck/check=none is deprecated and"
+ " will be removed in June 2020.");
clear_opt (opts->s_mount_opt, CHECK);
break;
case Opt_debug:
@@ -1335,9 +1338,6 @@
new_opts.s_resgid = sbi->s_resgid;
spin_unlock(&sbi->s_lock);
- /*
- * Allow the "check" option to be passed as a remount option.
- */
if (!parse_options(data, sb, &new_opts))
return -EINVAL;
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index bbd0465..f033f3a 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -883,8 +883,10 @@
rcu_read_lock();
list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
res = nfs_delegation_find_inode_server(server, fhandle);
- if (res != ERR_PTR(-ENOENT))
+ if (res != ERR_PTR(-ENOENT)) {
+ rcu_read_unlock();
return res;
+ }
}
rcu_read_unlock();
return ERR_PTR(-ENOENT);
diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c
index d4a07ac..8f00379 100644
--- a/fs/nfs/flexfilelayout/flexfilelayout.c
+++ b/fs/nfs/flexfilelayout/flexfilelayout.c
@@ -1243,17 +1243,18 @@
hdr->ds_clp, hdr->lseg,
hdr->pgio_mirror_idx);
+ clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+ clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
switch (err) {
case -NFS4ERR_RESET_TO_PNFS:
if (ff_layout_choose_best_ds_for_read(hdr->lseg,
hdr->pgio_mirror_idx + 1,
&hdr->pgio_mirror_idx))
goto out_eagain;
- ff_layout_read_record_layoutstats_done(task, hdr);
- pnfs_read_resend_pnfs(hdr);
+ set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
return task->tk_status;
case -NFS4ERR_RESET_TO_MDS:
- ff_layout_reset_read(hdr);
+ set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
return task->tk_status;
case -EAGAIN:
goto out_eagain;
@@ -1403,6 +1404,10 @@
struct nfs_pgio_header *hdr = data;
ff_layout_read_record_layoutstats_done(&hdr->task, hdr);
+ if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+ pnfs_read_resend_pnfs(hdr);
+ else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+ ff_layout_reset_read(hdr);
pnfs_generic_rw_release(data);
}
@@ -1423,12 +1428,14 @@
hdr->ds_clp, hdr->lseg,
hdr->pgio_mirror_idx);
+ clear_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
+ clear_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
switch (err) {
case -NFS4ERR_RESET_TO_PNFS:
- ff_layout_reset_write(hdr, true);
+ set_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags);
return task->tk_status;
case -NFS4ERR_RESET_TO_MDS:
- ff_layout_reset_write(hdr, false);
+ set_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags);
return task->tk_status;
case -EAGAIN:
return -EAGAIN;
@@ -1575,6 +1582,10 @@
struct nfs_pgio_header *hdr = data;
ff_layout_write_record_layoutstats_done(&hdr->task, hdr);
+ if (test_bit(NFS_IOHDR_RESEND_PNFS, &hdr->flags))
+ ff_layout_reset_write(hdr, true);
+ else if (test_bit(NFS_IOHDR_RESEND_MDS, &hdr->flags))
+ ff_layout_reset_write(hdr, false);
pnfs_generic_rw_release(data);
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ed45090..6dd1468 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -3294,6 +3294,7 @@
struct nfs4_closedata *calldata = data;
struct nfs4_state *state = calldata->state;
struct inode *inode = calldata->inode;
+ struct pnfs_layout_hdr *lo;
bool is_rdonly, is_wronly, is_rdwr;
int call_close = 0;
@@ -3337,6 +3338,12 @@
goto out_wait;
}
+ lo = calldata->arg.lr_args ? calldata->arg.lr_args->layout : NULL;
+ if (lo && !pnfs_layout_is_valid(lo)) {
+ calldata->arg.lr_args = NULL;
+ calldata->res.lr_res = NULL;
+ }
+
if (calldata->arg.fmode == 0)
task->tk_msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CLOSE];
@@ -5972,12 +5979,19 @@
static void nfs4_delegreturn_prepare(struct rpc_task *task, void *data)
{
struct nfs4_delegreturndata *d_data;
+ struct pnfs_layout_hdr *lo;
d_data = (struct nfs4_delegreturndata *)data;
if (!d_data->lr.roc && nfs4_wait_on_layoutreturn(d_data->inode, task))
return;
+ lo = d_data->args.lr_args ? d_data->args.lr_args->layout : NULL;
+ if (lo && !pnfs_layout_is_valid(lo)) {
+ d_data->args.lr_args = NULL;
+ d_data->res.lr_res = NULL;
+ }
+
nfs4_setup_sequence(d_data->res.server->nfs_client,
&d_data->args.seq_args,
&d_data->res.seq_res,
@@ -8650,6 +8664,8 @@
dprintk("--> %s tk_status => %d\n", __func__, -task->tk_status);
+ nfs4_sequence_free_slot(&lgp->res.seq_res);
+
switch (nfs4err) {
case 0:
goto out;
@@ -8714,7 +8730,6 @@
goto out;
}
- nfs4_sequence_free_slot(&lgp->res.seq_res);
err = nfs4_handle_exception(server, nfs4err, exception);
if (!status) {
if (exception->retry)
@@ -8786,20 +8801,22 @@
if (IS_ERR(task))
return ERR_CAST(task);
status = rpc_wait_for_completion_task(task);
- if (status == 0) {
+ if (status != 0)
+ goto out;
+
+ /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
+ if (task->tk_status < 0 || lgp->res.layoutp->len == 0) {
status = nfs4_layoutget_handle_exception(task, lgp, &exception);
*timeout = exception.timeout;
- }
-
+ } else
+ lseg = pnfs_layout_process(lgp);
+out:
trace_nfs4_layoutget(lgp->args.ctx,
&lgp->args.range,
&lgp->res.range,
&lgp->res.stateid,
status);
- /* if layoutp->len is 0, nfs4_layoutget_prepare called rpc_exit */
- if (status == 0 && lgp->res.layoutp->len)
- lseg = pnfs_layout_process(lgp);
rpc_put_task(task);
dprintk("<-- %s status=%d\n", __func__, status);
if (status)
@@ -8817,6 +8834,8 @@
&lrp->args.seq_args,
&lrp->res.seq_res,
task);
+ if (!pnfs_layout_is_valid(lrp->args.layout))
+ rpc_exit(task, 0);
}
static void nfs4_layoutreturn_done(struct rpc_task *task, void *calldata)
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index a8f5e6b..3fe8142 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -801,6 +801,11 @@
{
}
+static inline bool pnfs_layout_is_valid(const struct pnfs_layout_hdr *lo)
+{
+ return false;
+}
+
#endif /* CONFIG_NFS_V4_1 */
#if IS_ENABLED(CONFIG_NFS_V4_2)
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index d88231e..fc20e06 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -711,21 +711,18 @@
static unsigned long
dqcache_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- struct list_head *head;
struct dquot *dquot;
unsigned long freed = 0;
spin_lock(&dq_list_lock);
- head = free_dquots.prev;
- while (head != &free_dquots && sc->nr_to_scan) {
- dquot = list_entry(head, struct dquot, dq_free);
+ while (!list_empty(&free_dquots) && sc->nr_to_scan) {
+ dquot = list_first_entry(&free_dquots, struct dquot, dq_free);
remove_dquot_hash(dquot);
remove_free_dquot(dquot);
remove_inuse(dquot);
do_destroy_dquot(dquot);
sc->nr_to_scan--;
freed++;
- head = free_dquots.prev;
}
spin_unlock(&dq_list_lock);
return freed;
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1b961b1..fcda0fc 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -533,8 +533,7 @@
udf_write_aext(table, &epos, &eloc,
(etype << 30) | elen, 1);
} else
- udf_delete_aext(table, epos, eloc,
- (etype << 30) | elen);
+ udf_delete_aext(table, epos);
} else {
alloc_count = 0;
}
@@ -630,7 +629,7 @@
if (goal_elen)
udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
else
- udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
+ udf_delete_aext(table, goal_epos);
brelse(goal_epos.bh);
udf_add_free_space(sb, partition, -1);
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 0a98a23..d952301 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -141,10 +141,7 @@
fibh->ebh->b_data,
sizeof(struct fileIdentDesc) + fibh->soffset);
- fi_len = (sizeof(struct fileIdentDesc) +
- cfi->lengthFileIdent +
- le16_to_cpu(cfi->lengthOfImpUse) + 3) & ~3;
-
+ fi_len = udf_dir_entry_len(cfi);
*nf_pos += fi_len - (fibh->eoffset - fibh->soffset);
fibh->eoffset = fibh->soffset + fi_len;
} else {
@@ -152,6 +149,9 @@
sizeof(struct fileIdentDesc));
}
}
+ /* Got last entry outside of dir size - fs is corrupted! */
+ if (*nf_pos > dir->i_size)
+ return NULL;
return fi;
}
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 7f39d17..9915a58 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -1147,8 +1147,7 @@
if (startnum > endnum) {
for (i = 0; i < (startnum - endnum); i++)
- udf_delete_aext(inode, *epos, laarr[i].extLocation,
- laarr[i].extLength);
+ udf_delete_aext(inode, *epos);
} else if (startnum < endnum) {
for (i = 0; i < (endnum - startnum); i++) {
udf_insert_aext(inode, *epos, laarr[i].extLocation,
@@ -2176,14 +2175,15 @@
return (nelen >> 30);
}
-int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
- struct kernel_lb_addr eloc, uint32_t elen)
+int8_t udf_delete_aext(struct inode *inode, struct extent_position epos)
{
struct extent_position oepos;
int adsize;
int8_t etype;
struct allocExtDesc *aed;
struct udf_inode_info *iinfo;
+ struct kernel_lb_addr eloc;
+ uint32_t elen;
if (epos.bh) {
get_bh(epos.bh);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index c586026..06f37dd 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -351,8 +351,6 @@
loff_t f_pos;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
int nfidlen;
- uint8_t lfi;
- uint16_t liu;
udf_pblk_t block;
struct kernel_lb_addr eloc;
uint32_t elen = 0;
@@ -383,7 +381,7 @@
namelen = 0;
}
- nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3;
+ nfidlen = ALIGN(sizeof(struct fileIdentDesc) + namelen, UDF_NAME_PAD);
f_pos = udf_ext0_offset(dir);
@@ -424,12 +422,8 @@
goto out_err;
}
- liu = le16_to_cpu(cfi->lengthOfImpUse);
- lfi = cfi->lengthFileIdent;
-
if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) {
- if (((sizeof(struct fileIdentDesc) +
- liu + lfi + 3) & ~3) == nfidlen) {
+ if (udf_dir_entry_len(cfi) == nfidlen) {
cfi->descTag.tagSerialNum = cpu_to_le16(1);
cfi->fileVersionNum = cpu_to_le16(1);
cfi->fileCharacteristics = 0;
@@ -1201,9 +1195,7 @@
if (dir_fi) {
dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location);
- udf_update_tag((char *)dir_fi,
- (sizeof(struct fileIdentDesc) +
- le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3);
+ udf_update_tag((char *)dir_fi, udf_dir_entry_len(dir_fi));
if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB)
mark_inode_dirty(old_inode);
else
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index bae311b..84c47dd 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -132,6 +132,12 @@
extern int udf_write_fi(struct inode *inode, struct fileIdentDesc *,
struct fileIdentDesc *, struct udf_fileident_bh *,
uint8_t *, uint8_t *);
+static inline unsigned int udf_dir_entry_len(struct fileIdentDesc *cfi)
+{
+ return ALIGN(sizeof(struct fileIdentDesc) +
+ le16_to_cpu(cfi->lengthOfImpUse) + cfi->lengthFileIdent,
+ UDF_NAME_PAD);
+}
/* file.c */
extern long udf_ioctl(struct file *, unsigned int, unsigned long);
@@ -167,8 +173,7 @@
struct kernel_lb_addr *, uint32_t, int);
extern void udf_write_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t, int);
-extern int8_t udf_delete_aext(struct inode *, struct extent_position,
- struct kernel_lb_addr, uint32_t);
+extern int8_t udf_delete_aext(struct inode *, struct extent_position);
extern int8_t udf_next_aext(struct inode *, struct extent_position *,
struct kernel_lb_addr *, uint32_t *, int);
extern int8_t udf_current_aext(struct inode *, struct extent_position *,
diff --git a/include/acpi/processor.h b/include/acpi/processor.h
index 40a916e..1194a4c 100644
--- a/include/acpi/processor.h
+++ b/include/acpi/processor.h
@@ -309,7 +309,7 @@
{
return;
}
-static inline int acpi_processor_ppc_has_changed(struct acpi_processor *pr,
+static inline void acpi_processor_ppc_has_changed(struct acpi_processor *pr,
int event_flag)
{
static unsigned int printout = 1;
@@ -320,7 +320,6 @@
"Consider compiling CPUfreq support into your kernel.\n");
printout = 0;
}
- return 0;
}
static inline int acpi_processor_get_bios_limit(int cpu, unsigned int *limit)
{
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
index 0763f06..d10f1e7 100644
--- a/include/asm-generic/qspinlock_types.h
+++ b/include/asm-generic/qspinlock_types.h
@@ -63,7 +63,7 @@
/*
* Initializier
*/
-#define __ARCH_SPIN_LOCK_UNLOCKED { .val = ATOMIC_INIT(0) }
+#define __ARCH_SPIN_LOCK_UNLOCKED { { .val = ATOMIC_INIT(0) } }
/*
* Bitfields in the atomic value:
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index 0bd432a..2425176 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -22,7 +22,6 @@
*/
enum wb_state {
WB_registered, /* bdi_register() was done */
- WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
WB_start_all, /* nr_pages == 0 (all) work pending */
@@ -189,6 +188,7 @@
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
+ struct mutex cgwb_release_mutex; /* protect shutdown of wb structs */
#else
struct bdi_writeback_congested *wb_congested;
#endif
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 4bd2f34..201de12 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -503,6 +503,7 @@
* IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip
* IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask
* IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode
+ * IRQCHIP_SUPPORTS_LEVEL_MSI Chip can provide two doorbells for Level MSIs
*/
enum {
IRQCHIP_SET_TYPE_MASKED = (1 << 0),
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 25b33b6..dd1e40d 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -145,11 +145,6 @@
return desc->irq_common_data.handler_data;
}
-static inline struct msi_desc *irq_desc_get_msi_desc(struct irq_desc *desc)
-{
- return desc->irq_common_data.msi_desc;
-}
-
/*
* Architectures call this to let the generic IRQ layer
* handle an interrupt.
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index d231232..941dc0a 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -666,7 +666,7 @@
* your code. (Extra memory is used for special buffers that are
* allocated when trace_printk() is used.)
*
- * A little optization trick is done here. If there's only one
+ * A little optimization trick is done here. If there's only one
* argument, there's no need to scan the string for printf formats.
* The trace_puts() will suffice. But how can we take advantage of
* using trace_puts() when trace_printk() has only one argument?
diff --git a/include/linux/memory.h b/include/linux/memory.h
index 31ca3e2..a6ddefc 100644
--- a/include/linux/memory.h
+++ b/include/linux/memory.h
@@ -38,6 +38,7 @@
int arch_get_memory_phys_device(unsigned long start_pfn);
unsigned long memory_block_size_bytes(void);
+int set_memory_block_size_order(unsigned int order);
/* These states are exposed to userspace as text strings in sysfs */
#define MEM_ONLINE (1<<0) /* exposed to userspace */
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 9dee3c2..712eed1 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1438,6 +1438,8 @@
NFS_IOHDR_EOF,
NFS_IOHDR_REDO,
NFS_IOHDR_STAT,
+ NFS_IOHDR_RESEND_PNFS,
+ NFS_IOHDR_RESEND_MDS,
};
struct nfs_io_completion;
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 4193c41..a685da2 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -98,5 +98,7 @@
extern __must_check bool refcount_dec_not_one(refcount_t *r);
extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock);
extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock);
-
+extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
+ spinlock_t *lock,
+ unsigned long *flags);
#endif /* _LINUX_REFCOUNT_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 87bf02d..9256118 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1799,20 +1799,22 @@
set_tsk_thread_flag(t, TIF_NOTIFY_RESUME);
}
-void __rseq_handle_notify_resume(struct pt_regs *regs);
+void __rseq_handle_notify_resume(struct ksignal *sig, struct pt_regs *regs);
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
{
if (current->rseq)
- __rseq_handle_notify_resume(regs);
+ __rseq_handle_notify_resume(ksig, regs);
}
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
{
preempt_disable();
__set_bit(RSEQ_EVENT_SIGNAL_BIT, ¤t->rseq_event_mask);
preempt_enable();
- rseq_handle_notify_resume(regs);
+ rseq_handle_notify_resume(ksig, regs);
}
/* rseq_preempt() requires preemption to be disabled. */
@@ -1831,9 +1833,7 @@
/*
* If parent process has a registered restartable sequences area, the
- * child inherits. Only applies when forking a process, not a thread. In
- * case a parent fork() in the middle of a restartable sequence, set the
- * resume notifier to force the child to retry.
+ * child inherits. Only applies when forking a process, not a thread.
*/
static inline void rseq_fork(struct task_struct *t, unsigned long clone_flags)
{
@@ -1847,7 +1847,6 @@
t->rseq_len = current->rseq_len;
t->rseq_sig = current->rseq_sig;
t->rseq_event_mask = current->rseq_event_mask;
- rseq_preempt(t);
}
}
@@ -1864,10 +1863,12 @@
static inline void rseq_set_notify_resume(struct task_struct *t)
{
}
-static inline void rseq_handle_notify_resume(struct pt_regs *regs)
+static inline void rseq_handle_notify_resume(struct ksignal *ksig,
+ struct pt_regs *regs)
{
}
-static inline void rseq_signal_deliver(struct pt_regs *regs)
+static inline void rseq_signal_deliver(struct ksignal *ksig,
+ struct pt_regs *regs)
{
}
static inline void rseq_preempt(struct task_struct *t)
diff --git a/include/linux/spinlock.h b/include/linux/spinlock.h
index 1e8a464..fd57888 100644
--- a/include/linux/spinlock.h
+++ b/include/linux/spinlock.h
@@ -427,6 +427,11 @@
#define atomic_dec_and_lock(atomic, lock) \
__cond_lock(lock, _atomic_dec_and_lock(atomic, lock))
+extern int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+ unsigned long *flags);
+#define atomic_dec_and_lock_irqsave(atomic, lock, flags) \
+ __cond_lock(lock, _atomic_dec_and_lock_irqsave(atomic, lock, &(flags)))
+
int alloc_bucket_spinlocks(spinlock_t **locks, unsigned int *lock_mask,
size_t max_size, unsigned int cpu_mult,
gfp_t gfp);
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index 85a3fb6..20d6cc9 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -53,6 +53,9 @@
/* These are client behavior specific flags. */
#define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on
disconnect. */
+#define NBD_CFLAG_DISCONNECT_ON_CLOSE (1 << 1) /* disconnect the nbd device on
+ * close by last opener.
+ */
/* userspace doesn't need the nbd_device structure */
diff --git a/include/xen/xen.h b/include/xen/xen.h
index 9d4340c..1e1d9bd 100644
--- a/include/xen/xen.h
+++ b/include/xen/xen.h
@@ -25,12 +25,16 @@
#define xen_hvm_domain() (xen_domain_type == XEN_HVM_DOMAIN)
#define xen_pvh_domain() (xen_pvh)
+#include <linux/types.h>
+
+extern uint32_t xen_start_flags;
+
#ifdef CONFIG_XEN_DOM0
#include <xen/interface/xen.h>
#include <asm/xen/hypervisor.h>
#define xen_initial_domain() (xen_domain() && \
- xen_start_info && xen_start_info->flags & SIF_INITDOMAIN)
+ (xen_start_flags & SIF_INITDOMAIN))
#else /* !CONFIG_XEN_DOM0 */
#define xen_initial_domain() (0)
#endif /* CONFIG_XEN_DOM0 */
diff --git a/kernel/events/ring_buffer.c b/kernel/events/ring_buffer.c
index 045a37e..5d3cf40 100644
--- a/kernel/events/ring_buffer.c
+++ b/kernel/events/ring_buffer.c
@@ -103,7 +103,7 @@
preempt_enable();
}
-static bool __always_inline
+static __always_inline bool
ring_buffer_has_space(unsigned long head, unsigned long tail,
unsigned long data_size, unsigned int size,
bool backward)
@@ -114,7 +114,7 @@
return CIRC_SPACE(tail, head, data_size) >= size;
}
-static int __always_inline
+static __always_inline int
__perf_output_begin(struct perf_output_handle *handle,
struct perf_event *event, unsigned int size,
bool backward)
@@ -414,7 +414,7 @@
}
EXPORT_SYMBOL_GPL(perf_aux_output_begin);
-static bool __always_inline rb_need_aux_wakeup(struct ring_buffer *rb)
+static __always_inline bool rb_need_aux_wakeup(struct ring_buffer *rb)
{
if (rb->aux_overwrite)
return false;
diff --git a/kernel/irq/debugfs.c b/kernel/irq/debugfs.c
index 4dadeb3..6f63613 100644
--- a/kernel/irq/debugfs.c
+++ b/kernel/irq/debugfs.c
@@ -55,6 +55,7 @@
BIT_MASK_DESCR(IRQCHIP_SKIP_SET_WAKE),
BIT_MASK_DESCR(IRQCHIP_ONESHOT_SAFE),
BIT_MASK_DESCR(IRQCHIP_EOI_THREADED),
+ BIT_MASK_DESCR(IRQCHIP_SUPPORTS_LEVEL_MSI),
};
static void
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index edcac5d..5fa4d31 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -1265,11 +1265,11 @@
this.parent = NULL;
this.class = class;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_forward_deps(&this);
arch_spin_unlock(&lockdep_lock);
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return ret;
}
@@ -1292,11 +1292,11 @@
this.parent = NULL;
this.class = class;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
arch_spin_lock(&lockdep_lock);
ret = __lockdep_count_backward_deps(&this);
arch_spin_unlock(&lockdep_lock);
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
return ret;
}
@@ -4411,7 +4411,7 @@
if (unlikely(!debug_locks))
return;
- local_irq_save(flags);
+ raw_local_irq_save(flags);
for (i = 0; i < curr->lockdep_depth; i++) {
hlock = curr->held_locks + i;
@@ -4422,7 +4422,7 @@
print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
break;
}
- local_irq_restore(flags);
+ raw_local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
diff --git a/kernel/locking/rwsem.c b/kernel/locking/rwsem.c
index bc1e507..776308d 100644
--- a/kernel/locking/rwsem.c
+++ b/kernel/locking/rwsem.c
@@ -181,6 +181,7 @@
might_sleep();
__down_read(sem);
+ rwsem_set_reader_owned(sem);
}
EXPORT_SYMBOL(down_read_non_owner);
diff --git a/kernel/rseq.c b/kernel/rseq.c
index ae306f9..22b6acf 100644
--- a/kernel/rseq.c
+++ b/kernel/rseq.c
@@ -251,10 +251,10 @@
* respect to other threads scheduled on the same CPU, and with respect
* to signal handlers.
*/
-void __rseq_handle_notify_resume(struct pt_regs *regs)
+void __rseq_handle_notify_resume(struct ksignal *ksig, struct pt_regs *regs)
{
struct task_struct *t = current;
- int ret;
+ int ret, sig;
if (unlikely(t->flags & PF_EXITING))
return;
@@ -268,7 +268,8 @@
return;
error:
- force_sig(SIGSEGV, t);
+ sig = ksig ? ksig->sig : 0;
+ force_sigsegv(sig, t);
}
#ifdef CONFIG_DEBUG_RSEQ
diff --git a/kernel/softirq.c b/kernel/softirq.c
index de2f57f..900dcfe 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -139,9 +139,13 @@
{
lockdep_assert_irqs_disabled();
+ if (preempt_count() == cnt)
+ trace_preempt_on(CALLER_ADDR0, get_lock_parent_ip());
+
if (softirq_count() == (cnt & SOFTIRQ_MASK))
trace_softirqs_on(_RET_IP_);
- preempt_count_sub(cnt);
+
+ __preempt_count_sub(cnt);
}
/*
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 055a4a7..3e93c54 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1659,7 +1659,7 @@
int nanosleep_copyout(struct restart_block *restart, struct timespec64 *ts)
{
switch(restart->nanosleep.type) {
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
case TT_COMPAT:
if (compat_put_timespec64(ts, restart->nanosleep.compat_rmtp))
return -EFAULT;
diff --git a/kernel/time/posix-cpu-timers.c b/kernel/time/posix-cpu-timers.c
index 5a6251ac..9cdf54b 100644
--- a/kernel/time/posix-cpu-timers.c
+++ b/kernel/time/posix-cpu-timers.c
@@ -604,7 +604,6 @@
/*
* Disarm any old timer after extracting its expiry time.
*/
- lockdep_assert_irqs_disabled();
ret = 0;
old_incr = timer->it.cpu.incr;
@@ -1049,7 +1048,6 @@
/*
* Now re-arm for the new expiry time.
*/
- lockdep_assert_irqs_disabled();
arm_timer(timer);
unlock:
unlock_task_sighand(p, &flags);
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 6fa9921..2b41e8e 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -28,6 +28,7 @@
*/
#include <linux/export.h>
+#include <linux/kernel.h>
#include <linux/timex.h>
#include <linux/capability.h>
#include <linux/timekeeper_internal.h>
@@ -314,9 +315,10 @@
return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC);
#else
# if BITS_PER_LONG == 32
- return (HZ_TO_MSEC_MUL32 * j) >> HZ_TO_MSEC_SHR32;
+ return (HZ_TO_MSEC_MUL32 * j + (1ULL << HZ_TO_MSEC_SHR32) - 1) >>
+ HZ_TO_MSEC_SHR32;
# else
- return (j * HZ_TO_MSEC_NUM) / HZ_TO_MSEC_DEN;
+ return DIV_ROUND_UP(j * HZ_TO_MSEC_NUM, HZ_TO_MSEC_DEN);
# endif
#endif
}
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index c9336e9..a0079b4 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1360,8 +1360,6 @@
void
update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu)
{
- struct ring_buffer *buf;
-
if (tr->stop_count)
return;
@@ -1375,9 +1373,7 @@
arch_spin_lock(&tr->max_lock);
- buf = tr->trace_buffer.buffer;
- tr->trace_buffer.buffer = tr->max_buffer.buffer;
- tr->max_buffer.buffer = buf;
+ swap(tr->trace_buffer.buffer, tr->max_buffer.buffer);
__update_max_tr(tr, tsk, cpu);
arch_spin_unlock(&tr->max_lock);
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index e1c818d..0dceb77 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -78,7 +78,8 @@
C(TOO_MANY_PREDS, "Too many terms in predicate expression"), \
C(INVALID_FILTER, "Meaningless filter expression"), \
C(IP_FIELD_ONLY, "Only 'ip' field is supported for function trace"), \
- C(INVALID_VALUE, "Invalid value (did you forget quotes)?"),
+ C(INVALID_VALUE, "Invalid value (did you forget quotes)?"), \
+ C(NO_FILTER, "No filter found"),
#undef C
#define C(a, b) FILT_ERR_##a
@@ -550,6 +551,13 @@
goto out_free;
}
+ if (!N) {
+ /* No program? */
+ ret = -EINVAL;
+ parse_error(pe, FILT_ERR_NO_FILTER, ptr - str);
+ goto out_free;
+ }
+
prog[N].pred = NULL; /* #13 */
prog[N].target = 1; /* TRUE */
prog[N+1].pred = NULL;
diff --git a/lib/Makefile b/lib/Makefile
index 8153fda..90dc552 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -23,7 +23,7 @@
sha1.o chacha20.o irq_regs.o argv_split.o \
flex_proportions.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o kobject_uevent.o \
- earlycpio.o seq_buf.o siphash.o \
+ earlycpio.o seq_buf.o siphash.o dec_and_lock.o \
nmi_backtrace.o nodemask.o win_minmax.o
lib-$(CONFIG_PRINTK) += dump_stack.o
@@ -95,10 +95,6 @@
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
-ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
- lib-y += dec_and_lock.o
-endif
-
obj-$(CONFIG_BITREVERSE) += bitrev.o
obj-$(CONFIG_RATIONAL) += rational.o
obj-$(CONFIG_CRC_CCITT) += crc-ccitt.o
diff --git a/lib/dec_and_lock.c b/lib/dec_and_lock.c
index 347fa7a..9555b68 100644
--- a/lib/dec_and_lock.c
+++ b/lib/dec_and_lock.c
@@ -33,3 +33,19 @@
}
EXPORT_SYMBOL(_atomic_dec_and_lock);
+
+int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock,
+ unsigned long *flags)
+{
+ /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */
+ if (atomic_add_unless(atomic, -1, 1))
+ return 0;
+
+ /* Otherwise do it the slow way */
+ spin_lock_irqsave(lock, *flags);
+ if (atomic_dec_and_test(atomic))
+ return 1;
+ spin_unlock_irqrestore(lock, *flags);
+ return 0;
+}
+EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave);
diff --git a/lib/refcount.c b/lib/refcount.c
index 0eb4835..d3b81ce 100644
--- a/lib/refcount.c
+++ b/lib/refcount.c
@@ -350,3 +350,31 @@
}
EXPORT_SYMBOL(refcount_dec_and_lock);
+/**
+ * refcount_dec_and_lock_irqsave - return holding spinlock with disabled
+ * interrupts if able to decrement refcount to 0
+ * @r: the refcount
+ * @lock: the spinlock to be locked
+ * @flags: saved IRQ-flags if the is acquired
+ *
+ * Same as refcount_dec_and_lock() above except that the spinlock is acquired
+ * with disabled interupts.
+ *
+ * Return: true and hold spinlock if able to decrement refcount to 0, false
+ * otherwise
+ */
+bool refcount_dec_and_lock_irqsave(refcount_t *r, spinlock_t *lock,
+ unsigned long *flags)
+{
+ if (refcount_dec_not_one(r))
+ return false;
+
+ spin_lock_irqsave(lock, *flags);
+ if (!refcount_dec_and_test(r)) {
+ spin_unlock_irqrestore(lock, *flags);
+ return false;
+ }
+
+ return true;
+}
+EXPORT_SYMBOL(refcount_dec_and_lock_irqsave);
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 347cc83..2e5d3df 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -359,15 +359,8 @@
spin_lock_bh(&wb->work_lock);
if (!test_and_clear_bit(WB_registered, &wb->state)) {
spin_unlock_bh(&wb->work_lock);
- /*
- * Wait for wb shutdown to finish if someone else is just
- * running wb_shutdown(). Otherwise we could proceed to wb /
- * bdi destruction before wb_shutdown() is finished.
- */
- wait_on_bit(&wb->state, WB_shutting_down, TASK_UNINTERRUPTIBLE);
return;
}
- set_bit(WB_shutting_down, &wb->state);
spin_unlock_bh(&wb->work_lock);
cgwb_remove_from_bdi_list(wb);
@@ -379,12 +372,6 @@
mod_delayed_work(bdi_wq, &wb->dwork, 0);
flush_delayed_work(&wb->dwork);
WARN_ON(!list_empty(&wb->work_list));
- /*
- * Make sure bit gets cleared after shutdown is finished. Matches with
- * the barrier provided by test_and_clear_bit() above.
- */
- smp_wmb();
- clear_and_wake_up_bit(WB_shutting_down, &wb->state);
}
static void wb_exit(struct bdi_writeback *wb)
@@ -508,10 +495,12 @@
struct bdi_writeback *wb = container_of(work, struct bdi_writeback,
release_work);
+ mutex_lock(&wb->bdi->cgwb_release_mutex);
wb_shutdown(wb);
css_put(wb->memcg_css);
css_put(wb->blkcg_css);
+ mutex_unlock(&wb->bdi->cgwb_release_mutex);
fprop_local_destroy_percpu(&wb->memcg_completions);
percpu_ref_exit(&wb->refcnt);
@@ -697,6 +686,7 @@
INIT_RADIX_TREE(&bdi->cgwb_tree, GFP_ATOMIC);
bdi->cgwb_congested_tree = RB_ROOT;
+ mutex_init(&bdi->cgwb_release_mutex);
ret = wb_init(&bdi->wb, bdi, 1, GFP_KERNEL);
if (!ret) {
@@ -717,7 +707,10 @@
spin_lock_irq(&cgwb_lock);
radix_tree_for_each_slot(slot, &bdi->cgwb_tree, &iter, 0)
cgwb_kill(*slot);
+ spin_unlock_irq(&cgwb_lock);
+ mutex_lock(&bdi->cgwb_release_mutex);
+ spin_lock_irq(&cgwb_lock);
while (!list_empty(&bdi->wb_list)) {
wb = list_first_entry(&bdi->wb_list, struct bdi_writeback,
bdi_node);
@@ -726,6 +719,7 @@
spin_lock_irq(&cgwb_lock);
}
spin_unlock_irq(&cgwb_lock);
+ mutex_unlock(&bdi->cgwb_release_mutex);
}
/**
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 3c85af0..3fabf9f6 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -987,8 +987,6 @@
task->tk_status = -EAGAIN;
goto out_unlock;
}
- if (!bc_prealloc(req) && !req->rq_xmit_bytes_sent)
- req->rq_xid = xprt_alloc_xid(xprt);
ret = true;
out_unlock:
spin_unlock_bh(&xprt->transport_lock);
@@ -1298,7 +1296,12 @@
static inline __be32 xprt_alloc_xid(struct rpc_xprt *xprt)
{
- return (__force __be32)xprt->xid++;
+ __be32 xid;
+
+ spin_lock(&xprt->reserve_lock);
+ xid = (__force __be32)xprt->xid++;
+ spin_unlock(&xprt->reserve_lock);
+ return xid;
}
static inline void xprt_init_xid(struct rpc_xprt *xprt)
@@ -1316,6 +1319,7 @@
req->rq_task = task;
req->rq_xprt = xprt;
req->rq_buffer = NULL;
+ req->rq_xid = xprt_alloc_xid(xprt);
req->rq_connect_cookie = xprt->connect_cookie - 1;
req->rq_bytes_sent = 0;
req->rq_snd_buf.len = 0;
diff --git a/scripts/Makefile.build b/scripts/Makefile.build
index 34d9e9c..e7889f4 100644
--- a/scripts/Makefile.build
+++ b/scripts/Makefile.build
@@ -239,6 +239,7 @@
"$(CC_FLAGS_FTRACE)" ]; then \
$(sub_cmd_record_mcount) \
fi;
+endif # -record-mcount
endif # CONFIG_FTRACE_MCOUNT_RECORD
ifdef CONFIG_STACK_VALIDATION
@@ -263,7 +264,6 @@
objtool_args += --retpoline
endif
endif
-endif
ifdef CONFIG_MODVERSIONS
diff --git a/tools/objtool/check.c b/tools/objtool/check.c
index 38047c6..f4a25bd 100644
--- a/tools/objtool/check.c
+++ b/tools/objtool/check.c
@@ -164,6 +164,7 @@
"lbug_with_loc",
"fortify_panic",
"usercopy_abort",
+ "machine_real_restart",
};
if (func->bind == STB_WEAK)
diff --git a/tools/perf/Documentation/perf-stat.txt b/tools/perf/Documentation/perf-stat.txt
index 5dfe102..b10a90b 100644
--- a/tools/perf/Documentation/perf-stat.txt
+++ b/tools/perf/Documentation/perf-stat.txt
@@ -178,6 +178,9 @@
This option should be used together with "-I" option.
example: 'perf stat -I 1000 --interval-count 2 -e cycles -a'
+--interval-clear::
+Clear the screen before next interval.
+
--timeout msecs::
Stop the 'perf stat' session and print count deltas after N milliseconds (minimum: 10 ms).
This option is not supported with the "-I" option.
diff --git a/tools/perf/builtin-c2c.c b/tools/perf/builtin-c2c.c
index 307b359..6a8738f 100644
--- a/tools/perf/builtin-c2c.c
+++ b/tools/perf/builtin-c2c.c
@@ -56,16 +56,16 @@
struct compute_stats cstats;
+ unsigned long paddr;
+ unsigned long paddr_cnt;
+ bool paddr_zero;
+ char *nodestr;
+
/*
* must be at the end,
* because of its callchain dynamic entry
*/
struct hist_entry he;
-
- unsigned long paddr;
- unsigned long paddr_cnt;
- bool paddr_zero;
- char *nodestr;
};
static char const *coalesce_default = "pid,iaddr";
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index b3bf355..a31d708 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -180,6 +180,18 @@
PERF_OUTPUT_EVNAME | PERF_OUTPUT_TRACE
},
+ [PERF_TYPE_HW_CACHE] = {
+ .user_set = false,
+
+ .fields = PERF_OUTPUT_COMM | PERF_OUTPUT_TID |
+ PERF_OUTPUT_CPU | PERF_OUTPUT_TIME |
+ PERF_OUTPUT_EVNAME | PERF_OUTPUT_IP |
+ PERF_OUTPUT_SYM | PERF_OUTPUT_SYMOFFSET |
+ PERF_OUTPUT_DSO | PERF_OUTPUT_PERIOD,
+
+ .invalid_fields = PERF_OUTPUT_TRACE | PERF_OUTPUT_BPF_OUTPUT,
+ },
+
[PERF_TYPE_RAW] = {
.user_set = false,
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 096ccb2..22547a4 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -65,6 +65,7 @@
#include "util/tool.h"
#include "util/string2.h"
#include "util/metricgroup.h"
+#include "util/top.h"
#include "asm/bug.h"
#include <linux/time64.h>
@@ -144,6 +145,8 @@
typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
+#define METRIC_ONLY_LEN 20
+
static int run_count = 1;
static bool no_inherit = false;
static volatile pid_t child_pid = -1;
@@ -173,6 +176,7 @@
static aggr_get_id_t aggr_get_id;
static bool append_file;
static bool interval_count;
+static bool interval_clear;
static const char *output_name;
static int output_fd;
static int print_free_counters_hint;
@@ -180,6 +184,7 @@
static u64 *walltime_run;
static bool ru_display = false;
static struct rusage ru_data;
+static unsigned int metric_only_len = METRIC_ONLY_LEN;
struct perf_stat {
bool record;
@@ -967,8 +972,6 @@
fprintf(out, "%s%s%s%s", csv_sep, vals, csv_sep, unit);
}
-#define METRIC_ONLY_LEN 20
-
/* Filter out some columns that don't work well in metrics only mode */
static bool valid_only_metric(const char *unit)
@@ -999,22 +1002,20 @@
{
struct outstate *os = ctx;
FILE *out = os->fh;
- int n;
- char buf[1024];
- unsigned mlen = METRIC_ONLY_LEN;
+ char buf[1024], str[1024];
+ unsigned mlen = metric_only_len;
if (!valid_only_metric(unit))
return;
unit = fixunit(buf, os->evsel, unit);
- if (color)
- n = color_fprintf(out, color, fmt, val);
- else
- n = fprintf(out, fmt, val);
- if (n > METRIC_ONLY_LEN)
- n = METRIC_ONLY_LEN;
if (mlen < strlen(unit))
mlen = strlen(unit) + 1;
- fprintf(out, "%*s", mlen - n, "");
+
+ if (color)
+ mlen += strlen(color) + sizeof(PERF_COLOR_RESET) - 1;
+
+ color_snprintf(str, sizeof(str), color ?: "", fmt, val);
+ fprintf(out, "%*s ", mlen, str);
}
static void print_metric_only_csv(void *ctx, const char *color __maybe_unused,
@@ -1054,7 +1055,7 @@
if (csv_output)
fprintf(os->fh, "%s%s", unit, csv_sep);
else
- fprintf(os->fh, "%-*s ", METRIC_ONLY_LEN, unit);
+ fprintf(os->fh, "%*s ", metric_only_len, unit);
}
static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
@@ -1704,9 +1705,12 @@
FILE *output = stat_config.output;
static int num_print_interval;
+ if (interval_clear)
+ puts(CONSOLE_CLEAR);
+
sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
- if (num_print_interval == 0 && !csv_output) {
+ if ((num_print_interval == 0 && !csv_output) || interval_clear) {
switch (stat_config.aggr_mode) {
case AGGR_SOCKET:
fprintf(output, "# time socket cpus");
@@ -1719,7 +1723,7 @@
fprintf(output, " counts %*s events\n", unit_width, "unit");
break;
case AGGR_NONE:
- fprintf(output, "# time CPU");
+ fprintf(output, "# time CPU ");
if (!metric_only)
fprintf(output, " counts %*s events\n", unit_width, "unit");
break;
@@ -1738,7 +1742,7 @@
}
}
- if (num_print_interval == 0 && metric_only)
+ if ((num_print_interval == 0 && metric_only) || interval_clear)
print_metric_headers(" ", true);
if (++num_print_interval == 25)
num_print_interval = 0;
@@ -2057,6 +2061,8 @@
"(overhead is possible for values <= 100ms)"),
OPT_INTEGER(0, "interval-count", &stat_config.times,
"print counts for fixed number of times"),
+ OPT_BOOLEAN(0, "interval-clear", &interval_clear,
+ "clear screen in between new interval"),
OPT_UINTEGER(0, "timeout", &stat_config.timeout,
"stop workload and print counts after a timeout period in ms (>= 10ms)"),
OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
@@ -2436,14 +2442,13 @@
(PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
(PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
};
+ struct parse_events_error errinfo;
/* Set attrs if no event is selected and !null_run: */
if (null_run)
return 0;
if (transaction_run) {
- struct parse_events_error errinfo;
-
if (pmu_have_event("cpu", "cycles-ct") &&
pmu_have_event("cpu", "el-start"))
err = parse_events(evsel_list, transaction_attrs,
@@ -2454,6 +2459,7 @@
&errinfo);
if (err) {
fprintf(stderr, "Cannot set up transaction events\n");
+ parse_events_print_error(&errinfo, transaction_attrs);
return -1;
}
return 0;
@@ -2479,10 +2485,11 @@
pmu_have_event("msr", "smi")) {
if (!force_metric_only)
metric_only = true;
- err = parse_events(evsel_list, smi_cost_attrs, NULL);
+ err = parse_events(evsel_list, smi_cost_attrs, &errinfo);
} else {
fprintf(stderr, "To measure SMI cost, it needs "
"msr/aperf/, msr/smi/ and cpu/cycles/ support\n");
+ parse_events_print_error(&errinfo, smi_cost_attrs);
return -1;
}
if (err) {
@@ -2517,12 +2524,13 @@
if (topdown_attrs[0] && str) {
if (warn)
arch_topdown_group_warn();
- err = parse_events(evsel_list, str, NULL);
+ err = parse_events(evsel_list, str, &errinfo);
if (err) {
fprintf(stderr,
"Cannot set up top down events %s: %d\n",
str, err);
free(str);
+ parse_events_print_error(&errinfo, str);
return -1;
}
} else {
diff --git a/tools/perf/ui/gtk/hists.c b/tools/perf/ui/gtk/hists.c
index b085f1b..4ab663e 100644
--- a/tools/perf/ui/gtk/hists.c
+++ b/tools/perf/ui/gtk/hists.c
@@ -382,7 +382,7 @@
gtk_tree_store_set(store, &iter, col_idx++, s, -1);
}
- if (hists__has_callchains(hists) &&
+ if (hist_entry__has_callchains(h) &&
symbol_conf.use_callchain && hists__has(hists, sym)) {
if (callchain_param.mode == CHAIN_GRAPH_REL)
total = symbol_conf.cumulate_callchain ?
diff --git a/tools/perf/util/hist.c b/tools/perf/util/hist.c
index 52e8fda..828cb97 100644
--- a/tools/perf/util/hist.c
+++ b/tools/perf/util/hist.c
@@ -370,9 +370,11 @@
static int hist_entry__init(struct hist_entry *he,
struct hist_entry *template,
- bool sample_self)
+ bool sample_self,
+ size_t callchain_size)
{
*he = *template;
+ he->callchain_size = callchain_size;
if (symbol_conf.cumulate_callchain) {
he->stat_acc = malloc(sizeof(he->stat));
@@ -473,7 +475,7 @@
he = ops->new(callchain_size);
if (he) {
- err = hist_entry__init(he, template, sample_self);
+ err = hist_entry__init(he, template, sample_self, callchain_size);
if (err) {
ops->free(he);
he = NULL;
@@ -619,9 +621,11 @@
.raw_data = sample->raw_data,
.raw_size = sample->raw_size,
.ops = ops,
- };
+ }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
- return hists__findnew_entry(hists, &entry, al, sample_self);
+ if (!hists->has_callchains && he && he->callchain_size != 0)
+ hists->has_callchains = true;
+ return he;
}
struct hist_entry *hists__add_entry(struct hists *hists,
diff --git a/tools/perf/util/hist.h b/tools/perf/util/hist.h
index 06607c4..73049f7 100644
--- a/tools/perf/util/hist.h
+++ b/tools/perf/util/hist.h
@@ -85,6 +85,7 @@
struct events_stats stats;
u64 event_stream;
u16 col_len[HISTC_NR_COLS];
+ bool has_callchains;
int socket_filter;
struct perf_hpp_list *hpp_list;
struct list_head hpp_formats;
@@ -222,8 +223,7 @@
static __pure inline bool hists__has_callchains(struct hists *hists)
{
- const struct perf_evsel *evsel = hists_to_evsel(hists);
- return evsel__has_callchain(evsel);
+ return hists->has_callchains;
}
int hists__init(void);
diff --git a/tools/perf/util/parse-events.y b/tools/perf/util/parse-events.y
index 155d257..da8fe57 100644
--- a/tools/perf/util/parse-events.y
+++ b/tools/perf/util/parse-events.y
@@ -227,11 +227,16 @@
event_pmu:
PE_NAME opt_pmu_config
{
+ struct parse_events_state *parse_state = _parse_state;
+ struct parse_events_error *error = parse_state->error;
struct list_head *list, *orig_terms, *terms;
if (parse_events_copy_term_list($2, &orig_terms))
YYABORT;
+ if (error)
+ error->idx = @1.first_column;
+
ALLOC_LIST(list);
if (parse_events_add_pmu(_parse_state, list, $1, $2, false, false)) {
struct perf_pmu *pmu = NULL;
diff --git a/tools/perf/util/sort.h b/tools/perf/util/sort.h
index 7cf2d5c..8bf302c 100644
--- a/tools/perf/util/sort.h
+++ b/tools/perf/util/sort.h
@@ -112,6 +112,8 @@
char level;
u8 filtered;
+
+ u16 callchain_size;
union {
/*
* Since perf diff only supports the stdio output, TUI
@@ -153,7 +155,7 @@
static __pure inline bool hist_entry__has_callchains(struct hist_entry *he)
{
- return hists__has_callchains(he->hists);
+ return he->callchain_size != 0;
}
static inline bool hist_entry__has_pairs(struct hist_entry *he)
diff --git a/tools/power/x86/turbostat/turbostat.8 b/tools/power/x86/turbostat/turbostat.8
index ca9ef70..d39e4ff 100644
--- a/tools/power/x86/turbostat/turbostat.8
+++ b/tools/power/x86/turbostat/turbostat.8
@@ -56,7 +56,7 @@
.PP
\fB--hide column\fP do not show the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--hide sysfs" to hide the sysfs statistics columns as a group.
.PP
-\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default. Currently the only built-in counters disabled by default are "usec" and "Time_Of_Day_Seconds".
+\fB--enable column\fP show the specified built-in columns, which are otherwise disabled, by default. Currently the only built-in counters disabled by default are "usec", "Time_Of_Day_Seconds", "APIC" and "X2APIC".
The column name "all" can be used to enable all disabled-by-default built-in counters.
.PP
\fB--show column\fP show only the specified built-in columns. May be invoked multiple times, or with a comma-separated list of column names. Use "--show sysfs" to show the sysfs statistics columns as a group.
diff --git a/tools/power/x86/turbostat/turbostat.c b/tools/power/x86/turbostat/turbostat.c
index d6cff30..4d14bbb 100644
--- a/tools/power/x86/turbostat/turbostat.c
+++ b/tools/power/x86/turbostat/turbostat.c
@@ -109,6 +109,7 @@
unsigned int has_hwp_epp; /* IA32_HWP_REQUEST[bits 31:24] */
unsigned int has_hwp_pkg; /* IA32_HWP_REQUEST_PKG */
unsigned int has_misc_feature_control;
+unsigned int first_counter_read = 1;
#define RAPL_PKG (1 << 0)
/* 0x610 MSR_PKG_POWER_LIMIT */
@@ -170,6 +171,8 @@
unsigned long long irq_count;
unsigned int smi_count;
unsigned int cpu_id;
+ unsigned int apic_id;
+ unsigned int x2apic_id;
unsigned int flags;
#define CPU_IS_FIRST_THREAD_IN_CORE 0x2
#define CPU_IS_FIRST_CORE_IN_PACKAGE 0x4
@@ -381,19 +384,23 @@
}
/*
- * Each string in this array is compared in --show and --hide cmdline.
- * Thus, strings that are proper sub-sets must follow their more specific peers.
+ * This list matches the column headers, except
+ * 1. built-in only, the sysfs counters are not here -- we learn of those at run-time
+ * 2. Core and CPU are moved to the end, we can't have strings that contain them
+ * matching on them for --show and --hide.
*/
struct msr_counter bic[] = {
{ 0x0, "usec" },
{ 0x0, "Time_Of_Day_Seconds" },
{ 0x0, "Package" },
+ { 0x0, "Node" },
{ 0x0, "Avg_MHz" },
+ { 0x0, "Busy%" },
{ 0x0, "Bzy_MHz" },
{ 0x0, "TSC_MHz" },
{ 0x0, "IRQ" },
{ 0x0, "SMI", "", 32, 0, FORMAT_DELTA, NULL},
- { 0x0, "Busy%" },
+ { 0x0, "sysfs" },
{ 0x0, "CPU%c1" },
{ 0x0, "CPU%c3" },
{ 0x0, "CPU%c6" },
@@ -424,73 +431,73 @@
{ 0x0, "Cor_J" },
{ 0x0, "GFX_J" },
{ 0x0, "RAM_J" },
- { 0x0, "Core" },
- { 0x0, "CPU" },
{ 0x0, "Mod%c6" },
- { 0x0, "sysfs" },
{ 0x0, "Totl%C0" },
{ 0x0, "Any%C0" },
{ 0x0, "GFX%C0" },
{ 0x0, "CPUGFX%" },
- { 0x0, "Node%" },
+ { 0x0, "Core" },
+ { 0x0, "CPU" },
+ { 0x0, "APIC" },
+ { 0x0, "X2APIC" },
};
-
-
#define MAX_BIC (sizeof(bic) / sizeof(struct msr_counter))
#define BIC_USEC (1ULL << 0)
#define BIC_TOD (1ULL << 1)
#define BIC_Package (1ULL << 2)
-#define BIC_Avg_MHz (1ULL << 3)
-#define BIC_Bzy_MHz (1ULL << 4)
-#define BIC_TSC_MHz (1ULL << 5)
-#define BIC_IRQ (1ULL << 6)
-#define BIC_SMI (1ULL << 7)
-#define BIC_Busy (1ULL << 8)
-#define BIC_CPU_c1 (1ULL << 9)
-#define BIC_CPU_c3 (1ULL << 10)
-#define BIC_CPU_c6 (1ULL << 11)
-#define BIC_CPU_c7 (1ULL << 12)
-#define BIC_ThreadC (1ULL << 13)
-#define BIC_CoreTmp (1ULL << 14)
-#define BIC_CoreCnt (1ULL << 15)
-#define BIC_PkgTmp (1ULL << 16)
-#define BIC_GFX_rc6 (1ULL << 17)
-#define BIC_GFXMHz (1ULL << 18)
-#define BIC_Pkgpc2 (1ULL << 19)
-#define BIC_Pkgpc3 (1ULL << 20)
-#define BIC_Pkgpc6 (1ULL << 21)
-#define BIC_Pkgpc7 (1ULL << 22)
-#define BIC_Pkgpc8 (1ULL << 23)
-#define BIC_Pkgpc9 (1ULL << 24)
-#define BIC_Pkgpc10 (1ULL << 25)
-#define BIC_CPU_LPI (1ULL << 26)
-#define BIC_SYS_LPI (1ULL << 27)
-#define BIC_PkgWatt (1ULL << 26)
-#define BIC_CorWatt (1ULL << 27)
-#define BIC_GFXWatt (1ULL << 28)
-#define BIC_PkgCnt (1ULL << 29)
-#define BIC_RAMWatt (1ULL << 30)
-#define BIC_PKG__ (1ULL << 31)
-#define BIC_RAM__ (1ULL << 32)
-#define BIC_Pkg_J (1ULL << 33)
-#define BIC_Cor_J (1ULL << 34)
-#define BIC_GFX_J (1ULL << 35)
-#define BIC_RAM_J (1ULL << 36)
-#define BIC_Core (1ULL << 37)
-#define BIC_CPU (1ULL << 38)
-#define BIC_Mod_c6 (1ULL << 39)
-#define BIC_sysfs (1ULL << 40)
-#define BIC_Totl_c0 (1ULL << 41)
-#define BIC_Any_c0 (1ULL << 42)
-#define BIC_GFX_c0 (1ULL << 43)
-#define BIC_CPUGFX (1ULL << 44)
-#define BIC_Node (1ULL << 45)
+#define BIC_Node (1ULL << 3)
+#define BIC_Avg_MHz (1ULL << 4)
+#define BIC_Busy (1ULL << 5)
+#define BIC_Bzy_MHz (1ULL << 6)
+#define BIC_TSC_MHz (1ULL << 7)
+#define BIC_IRQ (1ULL << 8)
+#define BIC_SMI (1ULL << 9)
+#define BIC_sysfs (1ULL << 10)
+#define BIC_CPU_c1 (1ULL << 11)
+#define BIC_CPU_c3 (1ULL << 12)
+#define BIC_CPU_c6 (1ULL << 13)
+#define BIC_CPU_c7 (1ULL << 14)
+#define BIC_ThreadC (1ULL << 15)
+#define BIC_CoreTmp (1ULL << 16)
+#define BIC_CoreCnt (1ULL << 17)
+#define BIC_PkgTmp (1ULL << 18)
+#define BIC_GFX_rc6 (1ULL << 19)
+#define BIC_GFXMHz (1ULL << 20)
+#define BIC_Pkgpc2 (1ULL << 21)
+#define BIC_Pkgpc3 (1ULL << 22)
+#define BIC_Pkgpc6 (1ULL << 23)
+#define BIC_Pkgpc7 (1ULL << 24)
+#define BIC_Pkgpc8 (1ULL << 25)
+#define BIC_Pkgpc9 (1ULL << 26)
+#define BIC_Pkgpc10 (1ULL << 27)
+#define BIC_CPU_LPI (1ULL << 28)
+#define BIC_SYS_LPI (1ULL << 29)
+#define BIC_PkgWatt (1ULL << 30)
+#define BIC_CorWatt (1ULL << 31)
+#define BIC_GFXWatt (1ULL << 32)
+#define BIC_PkgCnt (1ULL << 33)
+#define BIC_RAMWatt (1ULL << 34)
+#define BIC_PKG__ (1ULL << 35)
+#define BIC_RAM__ (1ULL << 36)
+#define BIC_Pkg_J (1ULL << 37)
+#define BIC_Cor_J (1ULL << 38)
+#define BIC_GFX_J (1ULL << 39)
+#define BIC_RAM_J (1ULL << 40)
+#define BIC_Mod_c6 (1ULL << 41)
+#define BIC_Totl_c0 (1ULL << 42)
+#define BIC_Any_c0 (1ULL << 43)
+#define BIC_GFX_c0 (1ULL << 44)
+#define BIC_CPUGFX (1ULL << 45)
+#define BIC_Core (1ULL << 46)
+#define BIC_CPU (1ULL << 47)
+#define BIC_APIC (1ULL << 48)
+#define BIC_X2APIC (1ULL << 49)
-#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD)
+#define BIC_DISABLED_BY_DEFAULT (BIC_USEC | BIC_TOD | BIC_APIC | BIC_X2APIC)
unsigned long long bic_enabled = (0xFFFFFFFFFFFFFFFFULL & ~BIC_DISABLED_BY_DEFAULT);
-unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs;
+unsigned long long bic_present = BIC_USEC | BIC_TOD | BIC_sysfs | BIC_APIC | BIC_X2APIC;
#define DO_BIC(COUNTER_NAME) (bic_enabled & bic_present & COUNTER_NAME)
#define ENABLE_BIC(COUNTER_NAME) (bic_enabled |= COUNTER_NAME)
@@ -517,17 +524,34 @@
"when COMMAND completes.\n"
"If no COMMAND is specified, turbostat wakes every 5-seconds\n"
"to print statistics, until interrupted.\n"
- "--add add a counter\n"
- " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
- "--cpu cpu-set limit output to summary plus cpu-set:\n"
- " {core | package | j,k,l..m,n-p }\n"
- "--quiet skip decoding system configuration header\n"
- "--interval sec.subsec Override default 5-second measurement interval\n"
- "--help print this help message\n"
- "--list list column headers only\n"
- "--num_iterations num number of the measurement iterations\n"
- "--out file create or truncate \"file\" for all output\n"
- "--version print version information\n"
+ " -a, --add add a counter\n"
+ " eg. --add msr0x10,u64,cpu,delta,MY_TSC\n"
+ " -c, --cpu cpu-set limit output to summary plus cpu-set:\n"
+ " {core | package | j,k,l..m,n-p }\n"
+ " -d, --debug displays usec, Time_Of_Day_Seconds and more debugging\n"
+ " -D, --Dump displays the raw counter values\n"
+ " -e, --enable [all | column]\n"
+ " shows all or the specified disabled column\n"
+ " -H, --hide [column|column,column,...]\n"
+ " hide the specified column(s)\n"
+ " -i, --interval sec.subsec\n"
+ " Override default 5-second measurement interval\n"
+ " -J, --Joules displays energy in Joules instead of Watts\n"
+ " -l, --list list column headers only\n"
+ " -n, --num_iterations num\n"
+ " number of the measurement iterations\n"
+ " -o, --out file\n"
+ " create or truncate \"file\" for all output\n"
+ " -q, --quiet skip decoding system configuration header\n"
+ " -s, --show [column|column,column,...]\n"
+ " show only the specified column(s)\n"
+ " -S, --Summary\n"
+ " limits output to 1-line system summary per interval\n"
+ " -T, --TCC temperature\n"
+ " sets the Thermal Control Circuit temperature in\n"
+ " degrees Celsius\n"
+ " -h, --help print this help message\n"
+ " -v, --version print version information\n"
"\n"
"For more help, run \"man turbostat\"\n");
}
@@ -601,6 +625,10 @@
outp += sprintf(outp, "%sCore", (printed++ ? delim : ""));
if (DO_BIC(BIC_CPU))
outp += sprintf(outp, "%sCPU", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_APIC))
+ outp += sprintf(outp, "%sAPIC", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_X2APIC))
+ outp += sprintf(outp, "%sX2APIC", (printed++ ? delim : ""));
if (DO_BIC(BIC_Avg_MHz))
outp += sprintf(outp, "%sAvg_MHz", (printed++ ? delim : ""));
if (DO_BIC(BIC_Busy))
@@ -880,6 +908,10 @@
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
if (DO_BIC(BIC_CPU))
outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_APIC))
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
+ if (DO_BIC(BIC_X2APIC))
+ outp += sprintf(outp, "%s-", (printed++ ? delim : ""));
} else {
if (DO_BIC(BIC_Package)) {
if (p)
@@ -904,6 +936,10 @@
}
if (DO_BIC(BIC_CPU))
outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->cpu_id);
+ if (DO_BIC(BIC_APIC))
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->apic_id);
+ if (DO_BIC(BIC_X2APIC))
+ outp += sprintf(outp, "%s%d", (printed++ ? delim : ""), t->x2apic_id);
}
if (DO_BIC(BIC_Avg_MHz))
@@ -1231,6 +1267,12 @@
int i;
struct msr_counter *mp;
+ /* we run cpuid just the 1st time, copy the results */
+ if (DO_BIC(BIC_APIC))
+ new->apic_id = old->apic_id;
+ if (DO_BIC(BIC_X2APIC))
+ new->x2apic_id = old->x2apic_id;
+
/*
* the timestamps from start of measurement interval are in "old"
* the timestamp from end of measurement interval are in "new"
@@ -1393,6 +1435,12 @@
int i;
struct msr_counter *mp;
+ /* copy un-changing apic_id's */
+ if (DO_BIC(BIC_APIC))
+ average.threads.apic_id = t->apic_id;
+ if (DO_BIC(BIC_X2APIC))
+ average.threads.x2apic_id = t->x2apic_id;
+
/* remember first tv_begin */
if (average.threads.tv_begin.tv_sec == 0)
average.threads.tv_begin = t->tv_begin;
@@ -1619,6 +1667,34 @@
return 0;
}
+void get_apic_id(struct thread_data *t)
+{
+ unsigned int eax, ebx, ecx, edx, max_level;
+
+ eax = ebx = ecx = edx = 0;
+
+ if (!genuine_intel)
+ return;
+
+ __cpuid(0, max_level, ebx, ecx, edx);
+
+ __cpuid(1, eax, ebx, ecx, edx);
+ t->apic_id = (ebx >> 24) & 0xf;
+
+ if (max_level < 0xb)
+ return;
+
+ if (!DO_BIC(BIC_X2APIC))
+ return;
+
+ ecx = 0;
+ __cpuid(0xb, eax, ebx, ecx, edx);
+ t->x2apic_id = edx;
+
+ if (debug && (t->apic_id != t->x2apic_id))
+ fprintf(stderr, "cpu%d: apic 0x%x x2apic 0x%x\n", t->cpu_id, t->apic_id, t->x2apic_id);
+}
+
/*
* get_counters(...)
* migrate to cpu
@@ -1632,7 +1708,6 @@
struct msr_counter *mp;
int i;
-
gettimeofday(&t->tv_begin, (struct timezone *)NULL);
if (cpu_migrate(cpu)) {
@@ -1640,6 +1715,8 @@
return -1;
}
+ if (first_counter_read)
+ get_apic_id(t);
retry:
t->tsc = rdtsc(); /* we are running on local CPU of interest */
@@ -2432,6 +2509,12 @@
if (pni[pkg].count > topo.nodes_per_pkg)
topo.nodes_per_pkg = pni[0].count;
+ /* Fake 1 node per pkg for machines that don't
+ * expose nodes and thus avoid -nan results
+ */
+ if (topo.nodes_per_pkg == 0)
+ topo.nodes_per_pkg = 1;
+
for (cpu = 0; cpu < topo.num_cpus; cpu++) {
pkg = cpus[cpu].physical_package_id;
node = cpus[cpu].physical_node_id;
@@ -2879,6 +2962,7 @@
}
}
+
void turbostat_loop()
{
int retval;
@@ -2892,6 +2976,7 @@
snapshot_proc_sysfs_files();
retval = for_all_cpus(get_counters, EVEN_COUNTERS);
+ first_counter_read = 0;
if (retval < -1) {
exit(retval);
} else if (retval == -1) {
@@ -4392,7 +4477,7 @@
if (!quiet) {
fprintf(outf, "%d CPUID levels; family:model:stepping 0x%x:%x:%x (%d:%d:%d)\n",
max_level, family, model, stepping, family, model, stepping);
- fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s\n",
+ fprintf(outf, "CPUID(1): %s %s %s %s %s %s %s %s %s %s\n",
ecx & (1 << 0) ? "SSE3" : "-",
ecx & (1 << 3) ? "MONITOR" : "-",
ecx & (1 << 6) ? "SMX" : "-",
@@ -4401,6 +4486,7 @@
edx & (1 << 4) ? "TSC" : "-",
edx & (1 << 5) ? "MSR" : "-",
edx & (1 << 22) ? "ACPI-TM" : "-",
+ edx & (1 << 28) ? "HT" : "-",
edx & (1 << 29) ? "TM" : "-");
}
@@ -4652,7 +4738,6 @@
return;
}
-
/*
* in /dev/cpu/ return success for names that are numbers
* ie. filter out ".", "..", "microcode".
@@ -4842,6 +4927,13 @@
struct core_data *c;
struct pkg_data *p;
+
+ /* Workaround for systems where physical_node_id==-1
+ * and logical_node_id==(-1 - topo.num_cpus)
+ */
+ if (node_id < 0)
+ node_id = 0;
+
t = GET_THREAD(thread_base, thread_id, core_id, node_id, pkg_id);
c = GET_CORE(core_base, core_id, node_id, pkg_id);
p = GET_PKG(pkg_base, pkg_id);
@@ -4946,6 +5038,7 @@
snapshot_proc_sysfs_files();
status = for_all_cpus(get_counters, EVEN_COUNTERS);
+ first_counter_read = 0;
if (status)
exit(status);
/* clear affinity side-effect of get_counters() */
@@ -5009,7 +5102,7 @@
}
void print_version() {
- fprintf(outf, "turbostat version 18.06.01"
+ fprintf(outf, "turbostat version 18.06.20"
" - Len Brown <lenb@kernel.org>\n");
}
@@ -5381,7 +5474,7 @@
break;
case 'e':
/* --enable specified counter */
- bic_enabled |= bic_lookup(optarg, SHOW_LIST);
+ bic_enabled = bic_enabled | bic_lookup(optarg, SHOW_LIST);
break;
case 'd':
debug++;
@@ -5465,7 +5558,6 @@
int main(int argc, char **argv)
{
outf = stderr;
-
cmdline(argc, argv);
if (!quiet)
diff --git a/tools/testing/selftests/pstore/pstore_post_reboot_tests b/tools/testing/selftests/pstore/pstore_post_reboot_tests
index 6ccb154..22f8df1 100755
--- a/tools/testing/selftests/pstore/pstore_post_reboot_tests
+++ b/tools/testing/selftests/pstore/pstore_post_reboot_tests
@@ -7,13 +7,16 @@
#
# Released under the terms of the GPL v2.
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
. ./common_tests
if [ -e $REBOOT_FLAG ]; then
rm $REBOOT_FLAG
else
prlog "pstore_crash_test has not been executed yet. we skip further tests."
- exit 0
+ exit $ksft_skip
fi
prlog -n "Mounting pstore filesystem ... "
diff --git a/tools/testing/selftests/rseq/param_test.c b/tools/testing/selftests/rseq/param_test.c
index 6a9f602..6152523 100644
--- a/tools/testing/selftests/rseq/param_test.c
+++ b/tools/testing/selftests/rseq/param_test.c
@@ -137,6 +137,30 @@
"subic. %%" INJECT_ASM_REG ", %%" INJECT_ASM_REG ", 1\n\t" \
"bne 222b\n\t" \
"333:\n\t"
+
+#elif defined(__mips__)
+
+#define RSEQ_INJECT_INPUT \
+ , [loop_cnt_1]"m"(loop_cnt[1]) \
+ , [loop_cnt_2]"m"(loop_cnt[2]) \
+ , [loop_cnt_3]"m"(loop_cnt[3]) \
+ , [loop_cnt_4]"m"(loop_cnt[4]) \
+ , [loop_cnt_5]"m"(loop_cnt[5]) \
+ , [loop_cnt_6]"m"(loop_cnt[6])
+
+#define INJECT_ASM_REG "$5"
+
+#define RSEQ_INJECT_CLOBBER \
+ , INJECT_ASM_REG
+
+#define RSEQ_INJECT_ASM(n) \
+ "lw " INJECT_ASM_REG ", %[loop_cnt_" #n "]\n\t" \
+ "beqz " INJECT_ASM_REG ", 333f\n\t" \
+ "222:\n\t" \
+ "addiu " INJECT_ASM_REG ", -1\n\t" \
+ "bnez " INJECT_ASM_REG ", 222b\n\t" \
+ "333:\n\t"
+
#else
#error unsupported target
#endif
diff --git a/tools/testing/selftests/rseq/rseq-arm.h b/tools/testing/selftests/rseq/rseq-arm.h
index 3b055f9..3cea198 100644
--- a/tools/testing/selftests/rseq/rseq-arm.h
+++ b/tools/testing/selftests/rseq/rseq-arm.h
@@ -57,6 +57,7 @@
#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
abort_label, version, flags, \
start_ip, post_commit_offset, abort_ip) \
+ ".balign 32\n\t" \
__rseq_str(table_label) ":\n\t" \
".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
".word " __rseq_str(start_ip) ", 0x0, " __rseq_str(post_commit_offset) ", 0x0, " __rseq_str(abort_ip) ", 0x0\n\t" \
diff --git a/tools/testing/selftests/rseq/rseq-mips.h b/tools/testing/selftests/rseq/rseq-mips.h
new file mode 100644
index 0000000..7f48ecf
--- /dev/null
+++ b/tools/testing/selftests/rseq/rseq-mips.h
@@ -0,0 +1,725 @@
+/* SPDX-License-Identifier: LGPL-2.1 OR MIT */
+/*
+ * Author: Paul Burton <paul.burton@mips.com>
+ * (C) Copyright 2018 MIPS Tech LLC
+ *
+ * Based on rseq-arm.h:
+ * (C) Copyright 2016-2018 - Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
+ */
+
+#define RSEQ_SIG 0x53053053
+
+#define rseq_smp_mb() __asm__ __volatile__ ("sync" ::: "memory")
+#define rseq_smp_rmb() rseq_smp_mb()
+#define rseq_smp_wmb() rseq_smp_mb()
+
+#define rseq_smp_load_acquire(p) \
+__extension__ ({ \
+ __typeof(*p) ____p1 = RSEQ_READ_ONCE(*p); \
+ rseq_smp_mb(); \
+ ____p1; \
+})
+
+#define rseq_smp_acquire__after_ctrl_dep() rseq_smp_rmb()
+
+#define rseq_smp_store_release(p, v) \
+do { \
+ rseq_smp_mb(); \
+ RSEQ_WRITE_ONCE(*p, v); \
+} while (0)
+
+#ifdef RSEQ_SKIP_FASTPATH
+#include "rseq-skip.h"
+#else /* !RSEQ_SKIP_FASTPATH */
+
+#if _MIPS_SZLONG == 64
+# define LONG ".dword"
+# define LONG_LA "dla"
+# define LONG_L "ld"
+# define LONG_S "sd"
+# define LONG_ADDI "daddiu"
+# define U32_U64_PAD(x) x
+#elif _MIPS_SZLONG == 32
+# define LONG ".word"
+# define LONG_LA "la"
+# define LONG_L "lw"
+# define LONG_S "sw"
+# define LONG_ADDI "addiu"
+# ifdef __BIG_ENDIAN
+# define U32_U64_PAD(x) "0x0, " x
+# else
+# define U32_U64_PAD(x) x ", 0x0"
+# endif
+#else
+# error unsupported _MIPS_SZLONG
+#endif
+
+#define __RSEQ_ASM_DEFINE_TABLE(version, flags, start_ip, \
+ post_commit_offset, abort_ip) \
+ ".pushsection __rseq_table, \"aw\"\n\t" \
+ ".balign 32\n\t" \
+ ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+ ".popsection\n\t"
+
+#define RSEQ_ASM_DEFINE_TABLE(start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_TABLE(0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_STORE_RSEQ_CS(label, cs_label, rseq_cs) \
+ RSEQ_INJECT_ASM(1) \
+ LONG_LA " $4, " __rseq_str(cs_label) "\n\t" \
+ LONG_S " $4, %[" __rseq_str(rseq_cs) "]\n\t" \
+ __rseq_str(label) ":\n\t"
+
+#define RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, label) \
+ RSEQ_INJECT_ASM(2) \
+ "lw $4, %[" __rseq_str(current_cpu_id) "]\n\t" \
+ "bne $4, %[" __rseq_str(cpu_id) "], " __rseq_str(label) "\n\t"
+
+#define __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+ abort_label, version, flags, \
+ start_ip, post_commit_offset, abort_ip) \
+ ".balign 32\n\t" \
+ __rseq_str(table_label) ":\n\t" \
+ ".word " __rseq_str(version) ", " __rseq_str(flags) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(start_ip)) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(post_commit_offset)) "\n\t" \
+ LONG " " U32_U64_PAD(__rseq_str(abort_ip)) "\n\t" \
+ ".word " __rseq_str(RSEQ_SIG) "\n\t" \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "b %l[" __rseq_str(abort_label) "]\n\t"
+
+#define RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, abort_label, \
+ start_ip, post_commit_ip, abort_ip) \
+ __RSEQ_ASM_DEFINE_ABORT(table_label, label, teardown, \
+ abort_label, 0x0, 0x0, start_ip, \
+ (post_commit_ip - start_ip), abort_ip)
+
+#define RSEQ_ASM_DEFINE_CMPFAIL(label, teardown, cmpfail_label) \
+ __rseq_str(label) ":\n\t" \
+ teardown \
+ "b %l[" __rseq_str(cmpfail_label) "]\n\t"
+
+#define rseq_workaround_gcc_asm_size_guess() __asm__ __volatile__("")
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_storev(intptr_t *v, intptr_t expect, intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpnev_storeoffp_load(intptr_t *v, intptr_t expectnot,
+ off_t voffp, intptr_t *load, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "beq $4, %[expectnot], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "beq $4, %[expectnot], %l[error2]\n\t"
+#endif
+ LONG_S " $4, %[load]\n\t"
+ LONG_ADDI " $4, %[voffp]\n\t"
+ LONG_L " $4, 0($4)\n\t"
+ /* final store */
+ LONG_S " $4, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(5)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expectnot] "r" (expectnot),
+ [voffp] "Ir" (voffp),
+ [load] "m" (*load)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_addv(intptr_t *v, intptr_t count, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+#endif
+ LONG_L " $4, %[v]\n\t"
+ LONG_ADDI " $4, %[count]\n\t"
+ /* final store */
+ LONG_S " $4, %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(4)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ [v] "m" (*v),
+ [count] "Ir" (count)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort
+#ifdef RSEQ_COMPARE_TWICE
+ , error1
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+#endif
+ /* try store */
+ LONG_S " %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trystorev_storev_release(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t newv2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+#endif
+ /* try store */
+ LONG_S " %[newv2], %[v2]\n\t"
+ RSEQ_INJECT_ASM(5)
+ "sync\n\t" /* full sync provides store-release */
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* try store input */
+ [v2] "m" (*v2),
+ [newv2] "r" (newv2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_cmpeqv_storev(intptr_t *v, intptr_t expect,
+ intptr_t *v2, intptr_t expect2,
+ intptr_t newv, int cpu)
+{
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(4)
+ LONG_L " $4, %[v2]\n\t"
+ "bne $4, %[expect2], %l[cmpfail]\n\t"
+ RSEQ_INJECT_ASM(5)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, %l[error1])
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], %l[error2]\n\t"
+ LONG_L " $4, %[v2]\n\t"
+ "bne $4, %[expect2], %l[error3]\n\t"
+#endif
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ "b 5f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4, "", abort, 1b, 2b, 4f)
+ "5:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* cmp2 input */
+ [v2] "m" (*v2),
+ [expect2] "r" (expect2),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv)
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2, error3
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_bug("1st expected value comparison failed");
+error3:
+ rseq_bug("2nd expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uintptr_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ LONG_S " %[src], %[rseq_scratch0]\n\t"
+ LONG_S " %[dst], %[rseq_scratch1]\n\t"
+ LONG_S " %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], 7f\n\t"
+#endif
+ /* try memcpy */
+ "beqz %[len], 333f\n\t" \
+ "222:\n\t" \
+ "lb $4, 0(%[src])\n\t" \
+ "sb $4, 0(%[dst])\n\t" \
+ LONG_ADDI " %[src], 1\n\t" \
+ LONG_ADDI " %[dst], 1\n\t" \
+ LONG_ADDI " %[len], -1\n\t" \
+ "bnez %[len], 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t"
+ "b 8f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ abort, 1b, 2b, 4f)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ "8:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+static inline __attribute__((always_inline))
+int rseq_cmpeqv_trymemcpy_storev_release(intptr_t *v, intptr_t expect,
+ void *dst, void *src, size_t len,
+ intptr_t newv, int cpu)
+{
+ uintptr_t rseq_scratch[3];
+
+ RSEQ_INJECT_C(9)
+
+ rseq_workaround_gcc_asm_size_guess();
+ __asm__ __volatile__ goto (
+ RSEQ_ASM_DEFINE_TABLE(1f, 2f, 4f) /* start, commit, abort */
+ LONG_S " %[src], %[rseq_scratch0]\n\t"
+ LONG_S " %[dst], %[rseq_scratch1]\n\t"
+ LONG_S " %[len], %[rseq_scratch2]\n\t"
+ /* Start rseq by storing table entry pointer into rseq_cs. */
+ RSEQ_ASM_STORE_RSEQ_CS(1, 3f, rseq_cs)
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 4f)
+ RSEQ_INJECT_ASM(3)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], 5f\n\t"
+ RSEQ_INJECT_ASM(4)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_CMP_CPU_ID(cpu_id, current_cpu_id, 6f)
+ LONG_L " $4, %[v]\n\t"
+ "bne $4, %[expect], 7f\n\t"
+#endif
+ /* try memcpy */
+ "beqz %[len], 333f\n\t" \
+ "222:\n\t" \
+ "lb $4, 0(%[src])\n\t" \
+ "sb $4, 0(%[dst])\n\t" \
+ LONG_ADDI " %[src], 1\n\t" \
+ LONG_ADDI " %[dst], 1\n\t" \
+ LONG_ADDI " %[len], -1\n\t" \
+ "bnez %[len], 222b\n\t" \
+ "333:\n\t" \
+ RSEQ_INJECT_ASM(5)
+ "sync\n\t" /* full sync provides store-release */
+ /* final store */
+ LONG_S " %[newv], %[v]\n\t"
+ "2:\n\t"
+ RSEQ_INJECT_ASM(6)
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t"
+ "b 8f\n\t"
+ RSEQ_ASM_DEFINE_ABORT(3, 4,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ abort, 1b, 2b, 4f)
+ RSEQ_ASM_DEFINE_CMPFAIL(5,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ cmpfail)
+#ifdef RSEQ_COMPARE_TWICE
+ RSEQ_ASM_DEFINE_CMPFAIL(6,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error1)
+ RSEQ_ASM_DEFINE_CMPFAIL(7,
+ /* teardown */
+ LONG_L " %[len], %[rseq_scratch2]\n\t"
+ LONG_L " %[dst], %[rseq_scratch1]\n\t"
+ LONG_L " %[src], %[rseq_scratch0]\n\t",
+ error2)
+#endif
+ "8:\n\t"
+ : /* gcc asm goto does not allow outputs */
+ : [cpu_id] "r" (cpu),
+ [current_cpu_id] "m" (__rseq_abi.cpu_id),
+ [rseq_cs] "m" (__rseq_abi.rseq_cs),
+ /* final store input */
+ [v] "m" (*v),
+ [expect] "r" (expect),
+ [newv] "r" (newv),
+ /* try memcpy input */
+ [dst] "r" (dst),
+ [src] "r" (src),
+ [len] "r" (len),
+ [rseq_scratch0] "m" (rseq_scratch[0]),
+ [rseq_scratch1] "m" (rseq_scratch[1]),
+ [rseq_scratch2] "m" (rseq_scratch[2])
+ RSEQ_INJECT_INPUT
+ : "$4", "memory"
+ RSEQ_INJECT_CLOBBER
+ : abort, cmpfail
+#ifdef RSEQ_COMPARE_TWICE
+ , error1, error2
+#endif
+ );
+ rseq_workaround_gcc_asm_size_guess();
+ return 0;
+abort:
+ rseq_workaround_gcc_asm_size_guess();
+ RSEQ_INJECT_FAILED
+ return -1;
+cmpfail:
+ rseq_workaround_gcc_asm_size_guess();
+ return 1;
+#ifdef RSEQ_COMPARE_TWICE
+error1:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("cpu_id comparison failed");
+error2:
+ rseq_workaround_gcc_asm_size_guess();
+ rseq_bug("expected value comparison failed");
+#endif
+}
+
+#endif /* !RSEQ_SKIP_FASTPATH */
diff --git a/tools/testing/selftests/rseq/rseq.h b/tools/testing/selftests/rseq/rseq.h
index 0a80857..a468411 100644
--- a/tools/testing/selftests/rseq/rseq.h
+++ b/tools/testing/selftests/rseq/rseq.h
@@ -73,6 +73,8 @@
#include <rseq-arm.h>
#elif defined(__PPC__)
#include <rseq-ppc.h>
+#elif defined(__mips__)
+#include <rseq-mips.h>
#else
#error unsupported target
#endif
diff --git a/tools/testing/selftests/rseq/run_param_test.sh b/tools/testing/selftests/rseq/run_param_test.sh
old mode 100644
new mode 100755
diff --git a/tools/testing/selftests/sparc64/Makefile b/tools/testing/selftests/sparc64/Makefile
index 2082eef..a19531d 100644
--- a/tools/testing/selftests/sparc64/Makefile
+++ b/tools/testing/selftests/sparc64/Makefile
@@ -1,7 +1,18 @@
+# SPDX-License-Identifier: GPL-2.0
+uname_M := $(shell uname -m 2>/dev/null || echo not)
+ARCH ?= $(shell echo $(uname_M) | sed -e s/x86_64/x86/)
+
+ifneq ($(ARCH),sparc64)
+nothing:
+.PHONY: all clean run_tests install
+.SILENT:
+else
+
SUBDIRS := drivers
TEST_PROGS := run.sh
+
.PHONY: all clean
include ../lib.mk
@@ -18,10 +29,6 @@
fi \
done
-override define RUN_TESTS
- @cd $(OUTPUT); ./run.sh
-endef
-
override define INSTALL_RULE
mkdir -p $(INSTALL_PATH)
install -t $(INSTALL_PATH) $(TEST_PROGS) $(TEST_PROGS_EXTENDED) $(TEST_FILES)
@@ -33,10 +40,6 @@
done;
endef
-override define EMIT_TESTS
- echo "./run.sh"
-endef
-
override define CLEAN
@for DIR in $(SUBDIRS); do \
BUILD_TARGET=$(OUTPUT)/$$DIR; \
@@ -44,3 +47,4 @@
make OUTPUT=$$BUILD_TARGET -C $$DIR $@;\
done
endef
+endif
diff --git a/tools/testing/selftests/sparc64/drivers/Makefile b/tools/testing/selftests/sparc64/drivers/Makefile
index 6264f40..deb0df4 100644
--- a/tools/testing/selftests/sparc64/drivers/Makefile
+++ b/tools/testing/selftests/sparc64/drivers/Makefile
@@ -1,4 +1,4 @@
-
+# SPDX-License-Identifier: GPL-2.0
INCLUDEDIR := -I.
CFLAGS := $(CFLAGS) $(INCLUDEDIR) -Wall -O2 -g
diff --git a/tools/testing/selftests/static_keys/test_static_keys.sh b/tools/testing/selftests/static_keys/test_static_keys.sh
index 24cff49..fc9f8cd 100755
--- a/tools/testing/selftests/static_keys/test_static_keys.sh
+++ b/tools/testing/selftests/static_keys/test_static_keys.sh
@@ -2,6 +2,19 @@
# SPDX-License-Identifier: GPL-2.0
# Runs static keys kernel module tests
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_static_key_base; then
+ echo "static_key: module test_static_key_base is not found [SKIP]"
+ exit $ksft_skip
+fi
+
+if ! /sbin/modprobe -q -n test_static_keys; then
+ echo "static_key: module test_static_keys is not found [SKIP]"
+ exit $ksft_skip
+fi
+
if /sbin/modprobe -q test_static_key_base; then
if /sbin/modprobe -q test_static_keys; then
echo "static_key: ok"
diff --git a/tools/testing/selftests/sync/config b/tools/testing/selftests/sync/config
new file mode 100644
index 0000000..1ab7e81
--- /dev/null
+++ b/tools/testing/selftests/sync/config
@@ -0,0 +1,4 @@
+CONFIG_STAGING=y
+CONFIG_ANDROID=y
+CONFIG_SYNC=y
+CONFIG_SW_SYNC=y
diff --git a/tools/testing/selftests/sysctl/sysctl.sh b/tools/testing/selftests/sysctl/sysctl.sh
index ec232c3..584eb8e 100755
--- a/tools/testing/selftests/sysctl/sysctl.sh
+++ b/tools/testing/selftests/sysctl/sysctl.sh
@@ -14,6 +14,9 @@
# This performs a series tests against the proc sysctl interface.
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
TEST_NAME="sysctl"
TEST_DRIVER="test_${TEST_NAME}"
TEST_DIR=$(dirname $0)
@@ -41,7 +44,7 @@
echo "$0: $DIR not present" >&2
echo "You must have the following enabled in your kernel:" >&2
cat $TEST_DIR/config >&2
- exit 1
+ exit $ksft_skip
fi
}
@@ -98,28 +101,30 @@
uid=$(id -u)
if [ $uid -ne 0 ]; then
echo $msg must be run as root >&2
- exit 0
+ exit $ksft_skip
fi
if ! which perl 2> /dev/null > /dev/null; then
echo "$0: You need perl installed"
- exit 1
+ exit $ksft_skip
fi
if ! which getconf 2> /dev/null > /dev/null; then
echo "$0: You need getconf installed"
- exit 1
+ exit $ksft_skip
fi
if ! which diff 2> /dev/null > /dev/null; then
echo "$0: You need diff installed"
- exit 1
+ exit $ksft_skip
fi
}
function load_req_mod()
{
- trap "test_modprobe" EXIT
-
if [ ! -d $DIR ]; then
+ if ! modprobe -q -n $TEST_DRIVER; then
+ echo "$0: module $TEST_DRIVER not found [SKIP]"
+ exit $ksft_skip
+ fi
modprobe $TEST_DRIVER
if [ $? -ne 0 ]; then
exit
@@ -765,6 +770,7 @@
test_reqs
allow_user_defaults
check_production_sysctl_writes_strict
+test_modprobe
load_req_mod
trap "test_finish" EXIT
diff --git a/tools/testing/selftests/user/test_user_copy.sh b/tools/testing/selftests/user/test_user_copy.sh
index d60506f..f9b31a5 100755
--- a/tools/testing/selftests/user/test_user_copy.sh
+++ b/tools/testing/selftests/user/test_user_copy.sh
@@ -2,6 +2,13 @@
# SPDX-License-Identifier: GPL-2.0
# Runs copy_to/from_user infrastructure using test_user_copy kernel module
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
+if ! /sbin/modprobe -q -n test_user_copy; then
+ echo "user: module test_user_copy is not found [SKIP]"
+ exit $ksft_skip
+fi
if /sbin/modprobe -q test_user_copy; then
/sbin/modprobe -q -r test_user_copy
echo "user_copy: ok"
diff --git a/tools/testing/selftests/vm/compaction_test.c b/tools/testing/selftests/vm/compaction_test.c
index 1097f04..bcec712 100644
--- a/tools/testing/selftests/vm/compaction_test.c
+++ b/tools/testing/selftests/vm/compaction_test.c
@@ -16,6 +16,8 @@
#include <unistd.h>
#include <string.h>
+#include "../kselftest.h"
+
#define MAP_SIZE 1048576
struct map_list {
@@ -169,7 +171,7 @@
printf("Either the sysctl compact_unevictable_allowed is not\n"
"set to 1 or couldn't read the proc file.\n"
"Skipping the test\n");
- return 0;
+ return KSFT_SKIP;
}
lim.rlim_cur = RLIM_INFINITY;
diff --git a/tools/testing/selftests/vm/mlock2-tests.c b/tools/testing/selftests/vm/mlock2-tests.c
index 4997b92..637b6d0 100644
--- a/tools/testing/selftests/vm/mlock2-tests.c
+++ b/tools/testing/selftests/vm/mlock2-tests.c
@@ -9,6 +9,8 @@
#include <stdbool.h>
#include "mlock2.h"
+#include "../kselftest.h"
+
struct vm_boundaries {
unsigned long start;
unsigned long end;
@@ -303,7 +305,7 @@
if (mlock2_(map, 2 * page_size, 0)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
- _exit(0);
+ _exit(KSFT_SKIP);
}
perror("mlock2(0)");
goto unmap;
@@ -412,7 +414,7 @@
if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
- _exit(0);
+ _exit(KSFT_SKIP);
}
perror("mlock2(MLOCK_ONFAULT)");
goto unmap;
@@ -425,7 +427,7 @@
if (munlock(map, 2 * page_size)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
- _exit(0);
+ _exit(KSFT_SKIP);
}
perror("munlock()");
goto unmap;
@@ -457,7 +459,7 @@
if (mlock2_(map, 2 * page_size, MLOCK_ONFAULT)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
- _exit(0);
+ _exit(KSFT_SKIP);
}
perror("mlock2(MLOCK_ONFAULT)");
goto unmap;
@@ -583,7 +585,7 @@
if (call_mlock && mlock2_(map, 3 * page_size, MLOCK_ONFAULT)) {
if (errno == ENOSYS) {
printf("Cannot call new mlock family, skipping test\n");
- _exit(0);
+ _exit(KSFT_SKIP);
}
perror("mlock(ONFAULT)\n");
goto out;
diff --git a/tools/testing/selftests/vm/run_vmtests b/tools/testing/selftests/vm/run_vmtests
index 22d5646..88cbe55 100755
--- a/tools/testing/selftests/vm/run_vmtests
+++ b/tools/testing/selftests/vm/run_vmtests
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
#please run as root
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
mnt=./huge
exitcode=0
@@ -36,7 +39,7 @@
echo $(( $lackpgs + $nr_hugepgs )) > /proc/sys/vm/nr_hugepages
if [ $? -ne 0 ]; then
echo "Please run this test as root"
- exit 1
+ exit $ksft_skip
fi
while read name size unit; do
if [ "$name" = "HugePages_Free:" ]; then
diff --git a/tools/testing/selftests/vm/userfaultfd.c b/tools/testing/selftests/vm/userfaultfd.c
index de2f9ec..7b8171e 100644
--- a/tools/testing/selftests/vm/userfaultfd.c
+++ b/tools/testing/selftests/vm/userfaultfd.c
@@ -69,6 +69,8 @@
#include <setjmp.h>
#include <stdbool.h>
+#include "../kselftest.h"
+
#ifdef __NR_userfaultfd
static unsigned long nr_cpus, nr_pages, nr_pages_per_cpu, page_size;
@@ -1322,7 +1324,7 @@
int main(void)
{
printf("skip: Skipping userfaultfd test (missing __NR_userfaultfd)\n");
- return 0;
+ return KSFT_SKIP;
}
#endif /* __NR_userfaultfd */
diff --git a/tools/testing/selftests/zram/zram.sh b/tools/testing/selftests/zram/zram.sh
index 754de7d..232e958 100755
--- a/tools/testing/selftests/zram/zram.sh
+++ b/tools/testing/selftests/zram/zram.sh
@@ -2,6 +2,9 @@
# SPDX-License-Identifier: GPL-2.0
TCID="zram.sh"
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
. ./zram_lib.sh
run_zram () {
@@ -24,5 +27,5 @@
else
echo "$TCID : No zram.ko module or /dev/zram0 device file not found"
echo "$TCID : CONFIG_ZRAM is not set"
- exit 1
+ exit $ksft_skip
fi
diff --git a/tools/testing/selftests/zram/zram_lib.sh b/tools/testing/selftests/zram/zram_lib.sh
index f6a9c73..9e73a4f 100755
--- a/tools/testing/selftests/zram/zram_lib.sh
+++ b/tools/testing/selftests/zram/zram_lib.sh
@@ -18,6 +18,9 @@
dev_makeswap=-1
dev_mounted=-1
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+
trap INT
check_prereqs()
@@ -27,7 +30,7 @@
if [ $uid -ne 0 ]; then
echo $msg must be run as root >&2
- exit 0
+ exit $ksft_skip
fi
}
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index 72143cf..ea434dd 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -47,7 +47,7 @@
config KVM_COMPAT
def_bool y
- depends on KVM && COMPAT && !S390
+ depends on KVM && COMPAT && !(S390 || ARM64)
config HAVE_KVM_IRQ_BYPASS
bool
diff --git a/virt/kvm/arm/mmu.c b/virt/kvm/arm/mmu.c
index 8d90de2..1d90d79 100644
--- a/virt/kvm/arm/mmu.c
+++ b/virt/kvm/arm/mmu.c
@@ -297,6 +297,8 @@
phys_addr_t next;
assert_spin_locked(&kvm->mmu_lock);
+ WARN_ON(size & ~PAGE_MASK);
+
pgd = kvm->arch.pgd + stage2_pgd_index(addr);
do {
/*
diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c
index ff7dc89..cdce653 100644
--- a/virt/kvm/arm/vgic/vgic-v3.c
+++ b/virt/kvm/arm/vgic/vgic-v3.c
@@ -617,11 +617,6 @@
pr_warn("GICV physical address 0x%llx not page aligned\n",
(unsigned long long)info->vcpu.start);
kvm_vgic_global_state.vcpu_base = 0;
- } else if (!PAGE_ALIGNED(resource_size(&info->vcpu))) {
- pr_warn("GICV size 0x%llx not a multiple of page size 0x%lx\n",
- (unsigned long long)resource_size(&info->vcpu),
- PAGE_SIZE);
- kvm_vgic_global_state.vcpu_base = 0;
} else {
kvm_vgic_global_state.vcpu_base = info->vcpu.start;
kvm_vgic_global_state.can_emulate_gicv2 = true;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index ada21f4..8b47507f 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -116,6 +116,11 @@
#ifdef CONFIG_KVM_COMPAT
static long kvm_vcpu_compat_ioctl(struct file *file, unsigned int ioctl,
unsigned long arg);
+#define KVM_COMPAT(c) .compat_ioctl = (c)
+#else
+static long kvm_no_compat_ioctl(struct file *file, unsigned int ioctl,
+ unsigned long arg) { return -EINVAL; }
+#define KVM_COMPAT(c) .compat_ioctl = kvm_no_compat_ioctl
#endif
static int hardware_enable_all(void);
static void hardware_disable_all(void);
@@ -2396,11 +2401,9 @@
static struct file_operations kvm_vcpu_fops = {
.release = kvm_vcpu_release,
.unlocked_ioctl = kvm_vcpu_ioctl,
-#ifdef CONFIG_KVM_COMPAT
- .compat_ioctl = kvm_vcpu_compat_ioctl,
-#endif
.mmap = kvm_vcpu_mmap,
.llseek = noop_llseek,
+ KVM_COMPAT(kvm_vcpu_compat_ioctl),
};
/*
@@ -2824,10 +2827,8 @@
static const struct file_operations kvm_device_fops = {
.unlocked_ioctl = kvm_device_ioctl,
-#ifdef CONFIG_KVM_COMPAT
- .compat_ioctl = kvm_device_ioctl,
-#endif
.release = kvm_device_release,
+ KVM_COMPAT(kvm_device_ioctl),
};
struct kvm_device *kvm_device_from_filp(struct file *filp)
@@ -3165,10 +3166,8 @@
static struct file_operations kvm_vm_fops = {
.release = kvm_vm_release,
.unlocked_ioctl = kvm_vm_ioctl,
-#ifdef CONFIG_KVM_COMPAT
- .compat_ioctl = kvm_vm_compat_ioctl,
-#endif
.llseek = noop_llseek,
+ KVM_COMPAT(kvm_vm_compat_ioctl),
};
static int kvm_dev_ioctl_create_vm(unsigned long type)
@@ -3259,8 +3258,8 @@
static struct file_operations kvm_chardev_ops = {
.unlocked_ioctl = kvm_dev_ioctl,
- .compat_ioctl = kvm_dev_ioctl,
.llseek = noop_llseek,
+ KVM_COMPAT(kvm_dev_ioctl),
};
static struct miscdevice kvm_dev = {