Merge tag 'kvm-s390-next-4.6-2' of git://git.kernel.org/pub/scm/linux/kernel/git/kvms390/linux into HEAD
KVM: s390: Fixes and features for kvm/next (4.6) part 2
- add watchdog diagnose to trace event decoder
- better handle the cpu timer when not inside the guest
- only provide STFLE if the CPU model has STFLE
- reduce DMA page usage
diff --git a/Documentation/cgroup-v2.txt b/Documentation/cgroup-v2.txt
index e8d25e7..ff49cf9 100644
--- a/Documentation/cgroup-v2.txt
+++ b/Documentation/cgroup-v2.txt
@@ -7,7 +7,7 @@
conventions of cgroup v2. It describes all userland-visible aspects
of cgroup including core and specific controller behaviors. All
future changes must be reflected in this document. Documentation for
-v1 is available under Documentation/cgroup-legacy/.
+v1 is available under Documentation/cgroup-v1/.
CONTENTS
diff --git a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
index ace0599..20df350 100644
--- a/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
+++ b/Documentation/devicetree/bindings/clock/rockchip,rk3036-cru.txt
@@ -30,7 +30,7 @@
clock-output-names:
- "xin24m" - crystal input - required,
- "ext_i2s" - external I2S clock - optional,
- - "ext_gmac" - external GMAC clock - optional
+ - "rmii_clkin" - external EMAC clock - optional
Example: Clock controller node:
diff --git a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
index 7803e77..007a5b4 100644
--- a/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
+++ b/Documentation/devicetree/bindings/interrupt-controller/arm,gic-v3.txt
@@ -24,9 +24,8 @@
1 = edge triggered
4 = level triggered
- Cells 4 and beyond are reserved for future use. When the 1st cell
- has a value of 0 or 1, cells 4 and beyond act as padding, and may be
- ignored. It is recommended that padding cells have a value of 0.
+ Cells 4 and beyond are reserved for future use and must have a value
+ of 0 if present.
- reg : Specifies base physical address(s) and size of the GIC
registers, in the following order:
diff --git a/Documentation/devicetree/bindings/net/renesas,ravb.txt b/Documentation/devicetree/bindings/net/renesas,ravb.txt
index 81a9f9e..c8ac222 100644
--- a/Documentation/devicetree/bindings/net/renesas,ravb.txt
+++ b/Documentation/devicetree/bindings/net/renesas,ravb.txt
@@ -82,8 +82,8 @@
"ch16", "ch17", "ch18", "ch19",
"ch20", "ch21", "ch22", "ch23",
"ch24";
- clocks = <&mstp8_clks R8A7795_CLK_ETHERAVB>;
- power-domains = <&cpg_clocks>;
+ clocks = <&cpg CPG_MOD 812>;
+ power-domains = <&cpg>;
phy-mode = "rgmii-id";
phy-handle = <&phy0>;
diff --git a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
index 4e8b90e..07a7509 100644
--- a/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
+++ b/Documentation/devicetree/bindings/pci/pci-rcar-gen2.txt
@@ -8,6 +8,7 @@
Required properties:
- compatible: "renesas,pci-r8a7790" for the R8A7790 SoC;
"renesas,pci-r8a7791" for the R8A7791 SoC;
+ "renesas,pci-r8a7793" for the R8A7793 SoC;
"renesas,pci-r8a7794" for the R8A7794 SoC;
"renesas,pci-rcar-gen2" for a generic R-Car Gen2 compatible device
diff --git a/Documentation/devicetree/bindings/pci/rcar-pci.txt b/Documentation/devicetree/bindings/pci/rcar-pci.txt
index 558fe52..6cf9969 100644
--- a/Documentation/devicetree/bindings/pci/rcar-pci.txt
+++ b/Documentation/devicetree/bindings/pci/rcar-pci.txt
@@ -4,6 +4,7 @@
compatible: "renesas,pcie-r8a7779" for the R8A7779 SoC;
"renesas,pcie-r8a7790" for the R8A7790 SoC;
"renesas,pcie-r8a7791" for the R8A7791 SoC;
+ "renesas,pcie-r8a7793" for the R8A7793 SoC;
"renesas,pcie-r8a7795" for the R8A7795 SoC;
"renesas,pcie-rcar-gen2" for a generic R-Car Gen2 compatible device.
diff --git a/Documentation/devicetree/bindings/regulator/tps65217.txt b/Documentation/devicetree/bindings/regulator/tps65217.txt
index d181096..4f05d20 100644
--- a/Documentation/devicetree/bindings/regulator/tps65217.txt
+++ b/Documentation/devicetree/bindings/regulator/tps65217.txt
@@ -26,11 +26,7 @@
ti,pmic-shutdown-controller;
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: dcdc1 {
- reg = <0>;
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1800000>;
regulator-boot-on;
@@ -38,7 +34,6 @@
};
dcdc2_reg: dcdc2 {
- reg = <1>;
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
@@ -46,7 +41,6 @@
};
dcdc3_reg: dcc3 {
- reg = <2>;
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <1500000>;
regulator-boot-on;
@@ -54,7 +48,6 @@
};
ldo1_reg: ldo1 {
- reg = <3>;
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
@@ -62,7 +55,6 @@
};
ldo2_reg: ldo2 {
- reg = <4>;
regulator-min-microvolt = <900000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
@@ -70,7 +62,6 @@
};
ldo3_reg: ldo3 {
- reg = <5>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
@@ -78,7 +69,6 @@
};
ldo4_reg: ldo4 {
- reg = <6>;
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <3300000>;
regulator-boot-on;
diff --git a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
index ac2fcd6..1068ffc 100644
--- a/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
+++ b/Documentation/devicetree/bindings/rtc/s3c-rtc.txt
@@ -14,6 +14,10 @@
interrupt number is the rtc alarm interrupt and second interrupt number
is the rtc tick interrupt. The number of cells representing a interrupt
depends on the parent interrupt controller.
+- clocks: Must contain a list of phandle and clock specifier for the rtc
+ and source clocks.
+- clock-names: Must contain "rtc" and "rtc_src" entries sorted in the
+ same order as the clocks property.
Example:
@@ -21,4 +25,6 @@
compatible = "samsung,s3c6410-rtc";
reg = <0x10070000 0x100>;
interrupts = <44 0 45 0>;
+ clocks = <&clock CLK_RTC>, <&s2mps11_osc S2MPS11_CLK_AP>;
+ clock-names = "rtc", "rtc_src";
};
diff --git a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
index 35ae1fb..ed94c21 100644
--- a/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/serial/fsl-imx-uart.txt
@@ -9,7 +9,7 @@
- fsl,uart-has-rtscts : Indicate the uart has rts and cts
- fsl,irda-mode : Indicate the uart supports irda mode
- fsl,dte-mode : Indicate the uart works in DTE mode. The uart works
- is DCE mode by default.
+ in DCE mode by default.
Note: Each uart controller should have an alias correctly numbered
in "aliases" node.
diff --git a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
index ce55c0a..4da41bf 100644
--- a/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
+++ b/Documentation/devicetree/bindings/sound/fsl-asoc-card.txt
@@ -30,6 +30,8 @@
"fsl,imx-audio-sgtl5000"
(compatible with Documentation/devicetree/bindings/sound/imx-audio-sgtl5000.txt)
+ "fsl,imx-audio-wm8960"
+
Required properties:
- compatible : Contains one of entries in the compatible list.
diff --git a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
index 332e625..e5ee3f1 100644
--- a/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
+++ b/Documentation/devicetree/bindings/thermal/rcar-thermal.txt
@@ -1,8 +1,9 @@
* Renesas R-Car Thermal
Required properties:
-- compatible : "renesas,thermal-<soctype>", "renesas,rcar-thermal"
- as fallback.
+- compatible : "renesas,thermal-<soctype>",
+ "renesas,rcar-gen2-thermal" (with thermal-zone) or
+ "renesas,rcar-thermal" (without thermal-zone) as fallback.
Examples with soctypes are:
- "renesas,thermal-r8a73a4" (R-Mobile APE6)
- "renesas,thermal-r8a7779" (R-Car H1)
@@ -36,3 +37,35 @@
0xe61f0300 0x38>;
interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
};
+
+Example (with thermal-zone):
+
+thermal-zones {
+ cpu_thermal: cpu-thermal {
+ polling-delay-passive = <1000>;
+ polling-delay = <5000>;
+
+ thermal-sensors = <&thermal>;
+
+ trips {
+ cpu-crit {
+ temperature = <115000>;
+ hysteresis = <0>;
+ type = "critical";
+ };
+ };
+ cooling-maps {
+ };
+ };
+};
+
+thermal: thermal@e61f0000 {
+ compatible = "renesas,thermal-r8a7790",
+ "renesas,rcar-gen2-thermal",
+ "renesas,rcar-thermal";
+ reg = <0 0xe61f0000 0 0x14>, <0 0xe61f0100 0 0x38>;
+ interrupts = <0 69 IRQ_TYPE_LEVEL_HIGH>;
+ clocks = <&mstp5_clks R8A7790_CLK_THERMAL>;
+ power-domains = <&cpg_clocks>;
+ #thermal-sensor-cells = <0>;
+};
diff --git a/Documentation/filesystems/efivarfs.txt b/Documentation/filesystems/efivarfs.txt
index c477af0..686a64b 100644
--- a/Documentation/filesystems/efivarfs.txt
+++ b/Documentation/filesystems/efivarfs.txt
@@ -14,3 +14,10 @@
efivarfs is typically mounted like this,
mount -t efivarfs none /sys/firmware/efi/efivars
+
+Due to the presence of numerous firmware bugs where removing non-standard
+UEFI variables causes the system firmware to fail to POST, efivarfs
+files that are not well-known standardized variables are created
+as immutable files. This doesn't prevent removal - "chattr -i" will work -
+but it does prevent this kind of failure from being accomplished
+accidentally.
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 551ecf0..9a53c92 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -4235,6 +4235,17 @@
The default value of this parameter is determined by
the config option CONFIG_WQ_POWER_EFFICIENT_DEFAULT.
+ workqueue.debug_force_rr_cpu
+ Workqueue used to implicitly guarantee that work
+ items queued without explicit CPU specified are put
+ on the local CPU. This guarantee is no longer true
+ and while local CPU is still preferred work items
+ may be put on foreign CPUs. This debug option
+ forces round-robin CPU selection to flush out
+ usages which depend on the now broken guarantee.
+ When enabled, memory and cache locality will be
+ impacted.
+
x2apic_phys [X86-64,APIC] Use x2apic physical mode instead of
default x2apic cluster mode on platforms
supporting x2apic.
diff --git a/Documentation/timers/hpet.txt b/Documentation/timers/hpet.txt
index 767392f..a484d2c 100644
--- a/Documentation/timers/hpet.txt
+++ b/Documentation/timers/hpet.txt
@@ -1,9 +1,7 @@
High Precision Event Timer Driver for Linux
The High Precision Event Timer (HPET) hardware follows a specification
-by Intel and Microsoft which can be found at
-
- http://www.intel.com/hardwaredesign/hpetspec_1.pdf
+by Intel and Microsoft, revision 1.
Each HPET has one fixed-rate counter (at 10+ MHz, hence "High Precision")
and up to 32 comparators. Normally three or more comparators are provided,
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 07e4cdf..4d0542c 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2507,8 +2507,9 @@
4.80 KVM_SET_DEVICE_ATTR/KVM_GET_DEVICE_ATTR
-Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device
-Type: device ioctl, vm ioctl
+Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
+ KVM_CAP_VCPU_ATTRIBUTES for vcpu device
+Type: device ioctl, vm ioctl, vcpu ioctl
Parameters: struct kvm_device_attr
Returns: 0 on success, -1 on error
Errors:
@@ -2533,8 +2534,9 @@
4.81 KVM_HAS_DEVICE_ATTR
-Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device
-Type: device ioctl, vm ioctl
+Capability: KVM_CAP_DEVICE_CTRL, KVM_CAP_VM_ATTRIBUTES for vm device,
+ KVM_CAP_VCPU_ATTRIBUTES for vcpu device
+Type: device ioctl, vm ioctl, vcpu ioctl
Parameters: struct kvm_device_attr
Returns: 0 on success, -1 on error
Errors:
@@ -2577,6 +2579,8 @@
Depends on KVM_CAP_ARM_EL1_32BIT (arm64 only).
- KVM_ARM_VCPU_PSCI_0_2: Emulate PSCI v0.2 for the CPU.
Depends on KVM_CAP_ARM_PSCI_0_2.
+ - KVM_ARM_VCPU_PMU_V3: Emulate PMUv3 for the CPU.
+ Depends on KVM_CAP_ARM_PMU_V3.
4.83 KVM_ARM_PREFERRED_TARGET
@@ -3035,6 +3039,87 @@
Queues an SMI on the thread's vcpu.
+4.97 KVM_CAP_PPC_MULTITCE
+
+Capability: KVM_CAP_PPC_MULTITCE
+Architectures: ppc
+Type: vm
+
+This capability means the kernel is capable of handling hypercalls
+H_PUT_TCE_INDIRECT and H_STUFF_TCE without passing those into the user
+space. This significantly accelerates DMA operations for PPC KVM guests.
+User space should expect that its handlers for these hypercalls
+are not going to be called if user space previously registered LIOBN
+in KVM (via KVM_CREATE_SPAPR_TCE or similar calls).
+
+In order to enable H_PUT_TCE_INDIRECT and H_STUFF_TCE use in the guest,
+user space might have to advertise it for the guest. For example,
+IBM pSeries (sPAPR) guest starts using them if "hcall-multi-tce" is
+present in the "ibm,hypertas-functions" device-tree property.
+
+The hypercalls mentioned above may or may not be processed successfully
+in the kernel based fast path. If they can not be handled by the kernel,
+they will get passed on to user space. So user space still has to have
+an implementation for these despite the in kernel acceleration.
+
+This capability is always enabled.
+
+4.98 KVM_CREATE_SPAPR_TCE_64
+
+Capability: KVM_CAP_SPAPR_TCE_64
+Architectures: powerpc
+Type: vm ioctl
+Parameters: struct kvm_create_spapr_tce_64 (in)
+Returns: file descriptor for manipulating the created TCE table
+
+This is an extension for KVM_CAP_SPAPR_TCE which only supports 32bit
+windows, described in 4.62 KVM_CREATE_SPAPR_TCE
+
+This capability uses extended struct in ioctl interface:
+
+/* for KVM_CAP_SPAPR_TCE_64 */
+struct kvm_create_spapr_tce_64 {
+ __u64 liobn;
+ __u32 page_shift;
+ __u32 flags;
+ __u64 offset; /* in pages */
+ __u64 size; /* in pages */
+};
+
+The aim of extension is to support an additional bigger DMA window with
+a variable page size.
+KVM_CREATE_SPAPR_TCE_64 receives a 64bit window size, an IOMMU page shift and
+a bus offset of the corresponding DMA window, @size and @offset are numbers
+of IOMMU pages.
+
+@flags are not used at the moment.
+
+The rest of functionality is identical to KVM_CREATE_SPAPR_TCE.
+
+4.98 KVM_REINJECT_CONTROL
+
+Capability: KVM_CAP_REINJECT_CONTROL
+Architectures: x86
+Type: vm ioctl
+Parameters: struct kvm_reinject_control (in)
+Returns: 0 on success,
+ -EFAULT if struct kvm_reinject_control cannot be read,
+ -ENXIO if KVM_CREATE_PIT or KVM_CREATE_PIT2 didn't succeed earlier.
+
+i8254 (PIT) has two modes, reinject and !reinject. The default is reinject,
+where KVM queues elapsed i8254 ticks and monitors completion of interrupt from
+vector(s) that i8254 injects. Reinject mode dequeues a tick and injects its
+interrupt whenever there isn't a pending interrupt from i8254.
+!reinject mode injects an interrupt as soon as a tick arrives.
+
+struct kvm_reinject_control {
+ __u8 pit_reinject;
+ __u8 reserved[31];
+};
+
+pit_reinject = 0 (!reinject mode) is recommended, unless running an old
+operating system that uses the PIT for timing (e.g. Linux 2.4.x).
+
5. The kvm_run structure
------------------------
@@ -3339,6 +3424,7 @@
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
+#define KVM_EXIT_HYPERV_HCALL 2
__u32 type;
union {
struct {
@@ -3347,6 +3433,11 @@
__u64 evt_page;
__u64 msg_page;
} synic;
+ struct {
+ __u64 input;
+ __u64 result;
+ __u64 params[2];
+ } hcall;
} u;
};
/* KVM_EXIT_HYPERV */
diff --git a/Documentation/virtual/kvm/devices/vcpu.txt b/Documentation/virtual/kvm/devices/vcpu.txt
new file mode 100644
index 0000000..c041658
--- /dev/null
+++ b/Documentation/virtual/kvm/devices/vcpu.txt
@@ -0,0 +1,33 @@
+Generic vcpu interface
+====================================
+
+The virtual cpu "device" also accepts the ioctls KVM_SET_DEVICE_ATTR,
+KVM_GET_DEVICE_ATTR, and KVM_HAS_DEVICE_ATTR. The interface uses the same struct
+kvm_device_attr as other devices, but targets VCPU-wide settings and controls.
+
+The groups and attributes per virtual cpu, if any, are architecture specific.
+
+1. GROUP: KVM_ARM_VCPU_PMU_V3_CTRL
+Architectures: ARM64
+
+1.1. ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_IRQ
+Parameters: in kvm_device_attr.addr the address for PMU overflow interrupt is a
+ pointer to an int
+Returns: -EBUSY: The PMU overflow interrupt is already set
+ -ENXIO: The overflow interrupt not set when attempting to get it
+ -ENODEV: PMUv3 not supported
+ -EINVAL: Invalid PMU overflow interrupt number supplied
+
+A value describing the PMUv3 (Performance Monitor Unit v3) overflow interrupt
+number for this vcpu. This interrupt could be a PPI or SPI, but the interrupt
+type must be same for each vcpu. As a PPI, the interrupt number is the same for
+all vcpus, while as an SPI it must be a separate number per vcpu.
+
+1.2 ATTRIBUTE: KVM_ARM_VCPU_PMU_V3_INIT
+Parameters: no additional parameter in kvm_device_attr.addr
+Returns: -ENODEV: PMUv3 not supported
+ -ENXIO: PMUv3 not properly configured as required prior to calling this
+ attribute
+ -EBUSY: PMUv3 already initialized
+
+Request the initialization of the PMUv3.
diff --git a/Documentation/virtual/kvm/mmu.txt b/Documentation/virtual/kvm/mmu.txt
index daf9c0f..dda2e93 100644
--- a/Documentation/virtual/kvm/mmu.txt
+++ b/Documentation/virtual/kvm/mmu.txt
@@ -391,11 +391,11 @@
write-protected pages
- the guest page must be wholly contained by a single memory slot
-To check the last two conditions, the mmu maintains a ->write_count set of
+To check the last two conditions, the mmu maintains a ->disallow_lpage set of
arrays for each memory slot and large page size. Every write protected page
-causes its write_count to be incremented, thus preventing instantiation of
+causes its disallow_lpage to be incremented, thus preventing instantiation of
a large spte. The frames at the end of an unaligned memory slot have
-artificially inflated ->write_counts so they can never be instantiated.
+artificially inflated ->disallow_lpages so they can never be instantiated.
Zapping all pages (page generation count)
=========================================
diff --git a/MAINTAINERS b/MAINTAINERS
index 7f1fa4f..da3e4d8 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -920,17 +920,24 @@
S: Maintained
F: drivers/clk/sunxi/
-ARM/Amlogic MesonX SoC support
+ARM/Amlogic Meson SoC support
M: Carlo Caione <carlo@caione.org>
L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+L: linux-meson@googlegroups.com
+W: http://linux-meson.com/
S: Maintained
-F: drivers/media/rc/meson-ir.c
-N: meson[x68]
+F: arch/arm/mach-meson/
+F: arch/arm/boot/dts/meson*
+N: meson
ARM/Annapurna Labs ALPINE ARCHITECTURE
M: Tsahee Zidenberg <tsahee@annapurnalabs.com>
+M: Antoine Tenart <antoine.tenart@free-electrons.com>
S: Maintained
F: arch/arm/mach-alpine/
+F: arch/arm/boot/dts/alpine*
+F: arch/arm64/boot/dts/al/
+F: drivers/*/*alpine*
ARM/ATMEL AT91RM9200, AT91SAM9 AND SAMA5 SOC SUPPORT
M: Nicolas Ferre <nicolas.ferre@atmel.com>
@@ -1442,8 +1449,8 @@
ARM/RENESAS ARM64 ARCHITECTURE
M: Simon Horman <horms@verge.net.au>
M: Magnus Damm <magnus.damm@gmail.com>
-L: linux-sh@vger.kernel.org
-Q: http://patchwork.kernel.org/project/linux-sh/list/
+L: linux-renesas-soc@vger.kernel.org
+Q: http://patchwork.kernel.org/project/linux-renesas-soc/list/
T: git git://git.kernel.org/pub/scm/linux/kernel/git/horms/renesas.git next
S: Supported
F: arch/arm64/boot/dts/renesas/
@@ -2362,14 +2369,6 @@
S: Maintained
N: bcm2835
-BROADCOM BCM33XX MIPS ARCHITECTURE
-M: Kevin Cernekee <cernekee@gmail.com>
-L: linux-mips@linux-mips.org
-S: Maintained
-F: arch/mips/bcm3384/*
-F: arch/mips/include/asm/mach-bcm3384/*
-F: arch/mips/kernel/*bmips*
-
BROADCOM BCM47XX MIPS ARCHITECTURE
M: Hauke Mehrtens <hauke@hauke-m.de>
M: Rafał Miłecki <zajec5@gmail.com>
@@ -3452,7 +3451,6 @@
DESIGNWARE USB3 DRD IP DRIVER
M: Felipe Balbi <balbi@kernel.org>
L: linux-usb@vger.kernel.org
-L: linux-omap@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
F: drivers/usb/dwc3/
@@ -6136,7 +6134,7 @@
KERNEL SELFTEST FRAMEWORK
M: Shuah Khan <shuahkh@osg.samsung.com>
-L: linux-api@vger.kernel.org
+L: linux-kselftest@vger.kernel.org
T: git git://git.kernel.org/pub/scm/shuah/linux-kselftest
S: Maintained
F: tools/testing/selftests
@@ -7362,7 +7360,7 @@
F: include/linux/isicom.h
MUSB MULTIPOINT HIGH SPEED DUAL-ROLE CONTROLLER
-M: Felipe Balbi <balbi@kernel.org>
+M: Bin Liu <b-liu@ti.com>
L: linux-usb@vger.kernel.org
T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
S: Maintained
@@ -7694,13 +7692,13 @@
F: arch/nios2/
NOKIA N900 POWER SUPPLY DRIVERS
-M: Pali Rohár <pali.rohar@gmail.com>
-S: Maintained
+R: Pali Rohár <pali.rohar@gmail.com>
F: include/linux/power/bq2415x_charger.h
F: include/linux/power/bq27xxx_battery.h
F: include/linux/power/isp1704_charger.h
F: drivers/power/bq2415x_charger.c
F: drivers/power/bq27xxx_battery.c
+F: drivers/power/bq27xxx_battery_i2c.c
F: drivers/power/isp1704_charger.c
F: drivers/power/rx51_battery.c
@@ -7931,11 +7929,9 @@
F: drivers/staging/media/omap4iss/
OMAP USB SUPPORT
-M: Felipe Balbi <balbi@kernel.org>
L: linux-usb@vger.kernel.org
L: linux-omap@vger.kernel.org
-T: git git://git.kernel.org/pub/scm/linux/kernel/git/balbi/usb.git
-S: Maintained
+S: Orphan
F: drivers/usb/*/*omap*
F: arch/arm/*omap*/usb*
@@ -9566,6 +9562,12 @@
S: Maintained
F: drivers/thunderbolt/
+TI BQ27XXX POWER SUPPLY DRIVER
+R: Andrew F. Davis <afd@ti.com>
+F: include/linux/power/bq27xxx_battery.h
+F: drivers/power/bq27xxx_battery.c
+F: drivers/power/bq27xxx_battery_i2c.c
+
TIMEKEEPING, CLOCKSOURCE CORE, NTP, ALARMTIMER
M: John Stultz <john.stultz@linaro.org>
M: Thomas Gleixner <tglx@linutronix.de>
@@ -9787,10 +9789,11 @@
F: drivers/scsi/be2iscsi/
Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
-M: Sathya Perla <sathya.perla@avagotech.com>
-M: Ajit Khaparde <ajit.khaparde@avagotech.com>
-M: Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
-M: Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
+M: Sathya Perla <sathya.perla@broadcom.com>
+M: Ajit Khaparde <ajit.khaparde@broadcom.com>
+M: Padmanabh Ratnakar <padmanabh.ratnakar@broadcom.com>
+M: Sriharsha Basavapatna <sriharsha.basavapatna@broadcom.com>
+M: Somnath Kotur <somnath.kotur@broadcom.com>
L: netdev@vger.kernel.org
W: http://www.emulex.com
S: Supported
@@ -12020,7 +12023,6 @@
F: arch/arm64/include/asm/xen/
XEN NETWORK BACKEND DRIVER
-M: Ian Campbell <ian.campbell@citrix.com>
M: Wei Liu <wei.liu2@citrix.com>
L: xen-devel@lists.xenproject.org (moderated for non-subscribers)
L: netdev@vger.kernel.org
diff --git a/Makefile b/Makefile
index 6828408..af6e5f8 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 4
PATCHLEVEL = 5
SUBLEVEL = 0
-EXTRAVERSION = -rc3
+EXTRAVERSION = -rc6
NAME = Blurry Fish Butt
# *DOCUMENTATION*
diff --git a/arch/arc/Kconfig b/arch/arc/Kconfig
index 76dde9d..8a188bc 100644
--- a/arch/arc/Kconfig
+++ b/arch/arc/Kconfig
@@ -12,8 +12,6 @@
select BUILDTIME_EXTABLE_SORT
select COMMON_CLK
select CLONE_BACKWARDS
- # ARC Busybox based initramfs absolutely relies on DEVTMPFS for /dev
- select DEVTMPFS if !INITRAMFS_SOURCE=""
select GENERIC_ATOMIC64
select GENERIC_CLOCKEVENTS
select GENERIC_FIND_FIRST_BIT
@@ -275,14 +273,6 @@
default "0xA0000000"
depends on ARC_HAS_DCCM
-config ARC_HAS_HW_MPY
- bool "Use Hardware Multiplier (Normal or Faster XMAC)"
- default y
- help
- Influences how gcc generates code for MPY operations.
- If enabled, MPYxx insns are generated, provided by Standard/XMAC
- Multipler. Otherwise software multipy lib is used
-
choice
prompt "MMU Version"
default ARC_MMU_V3 if ARC_CPU_770
@@ -338,6 +328,19 @@
endchoice
+choice
+ prompt "MMU Super Page Size"
+ depends on ISA_ARCV2 && TRANSPARENT_HUGEPAGE
+ default ARC_HUGEPAGE_2M
+
+config ARC_HUGEPAGE_2M
+ bool "2MB"
+
+config ARC_HUGEPAGE_16M
+ bool "16MB"
+
+endchoice
+
if ISA_ARCOMPACT
config ARC_COMPACT_IRQ_LEVELS
@@ -410,7 +413,7 @@
default n
depends on !SMP
-config ARC_HAS_GRTC
+config ARC_HAS_GFRC
bool "SMP synchronized 64-bit cycle counter"
default y
depends on SMP
@@ -529,14 +532,6 @@
Counts number of I and D TLB Misses and exports them via Debugfs
The counters can be cleared via Debugfs as well
-if SMP
-
-config ARC_IPI_DBG
- bool "Debug Inter Core interrupts"
- default n
-
-endif
-
endif
config ARC_UBOOT_SUPPORT
@@ -566,6 +561,12 @@
endmenu # "ARC Architecture Configuration"
source "mm/Kconfig"
+
+config FORCE_MAX_ZONEORDER
+ int "Maximum zone order"
+ default "12" if ARC_HUGEPAGE_16M
+ default "11"
+
source "net/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index aeb1902..c8230f3 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -74,10 +74,6 @@
# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
ldflags-$(upto_gcc44) += -marclinux
-ifndef CONFIG_ARC_HAS_HW_MPY
- cflags-y += -mno-mpy
-endif
-
LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
diff --git a/arch/arc/configs/axs101_defconfig b/arch/arc/configs/axs101_defconfig
index f1ac981..5d4e2a0 100644
--- a/arch/arc/configs/axs101_defconfig
+++ b/arch/arc/configs/axs101_defconfig
@@ -39,6 +39,7 @@
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -73,7 +74,6 @@
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
@@ -91,12 +91,10 @@
CONFIG_MMC_DW=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/axs103_defconfig b/arch/arc/configs/axs103_defconfig
index 323486d..87ee46b 100644
--- a/arch/arc/configs/axs103_defconfig
+++ b/arch/arc/configs/axs103_defconfig
@@ -39,14 +39,10 @@
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_AXS=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
@@ -78,14 +74,12 @@
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
@@ -97,12 +91,10 @@
CONFIG_MMC_DW=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/axs103_smp_defconfig b/arch/arc/configs/axs103_smp_defconfig
index 66191cd..d80daf4 100644
--- a/arch/arc/configs/axs103_smp_defconfig
+++ b/arch/arc/configs/axs103_smp_defconfig
@@ -40,14 +40,10 @@
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_MTD=y
-CONFIG_MTD_CMDLINE_PARTS=y
-CONFIG_MTD_BLOCK=y
-CONFIG_MTD_NAND=y
-CONFIG_MTD_NAND_AXS=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_NETDEVICES=y
@@ -79,14 +75,12 @@
CONFIG_I2C_DESIGNWARE_PLATFORM=y
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
# CONFIG_LOGO_LINUX_CLUT224 is not set
-CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_OHCI_HCD=y
@@ -98,12 +92,10 @@
CONFIG_MMC_DW=y
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT3_FS=y
-CONFIG_EXT4_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_NTFS_FS=y
CONFIG_TMPFS=y
-CONFIG_JFFS2_FS=y
CONFIG_NFS_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
diff --git a/arch/arc/configs/nsim_700_defconfig b/arch/arc/configs/nsim_700_defconfig
index 138f9d8..f410953 100644
--- a/arch/arc/configs/nsim_700_defconfig
+++ b/arch/arc/configs/nsim_700_defconfig
@@ -4,6 +4,7 @@
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -26,7 +27,6 @@
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_700"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -34,6 +34,7 @@
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -51,7 +52,6 @@
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
@@ -63,4 +63,3 @@
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
# CONFIG_DEBUG_PREEMPT is not set
-CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsim_hs_defconfig b/arch/arc/configs/nsim_hs_defconfig
index f68838e..cfaa33c 100644
--- a/arch/arc/configs/nsim_hs_defconfig
+++ b/arch/arc/configs/nsim_hs_defconfig
@@ -35,6 +35,7 @@
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -49,7 +50,6 @@
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
@@ -61,4 +61,3 @@
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
# CONFIG_DEBUG_PREEMPT is not set
-CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsim_hs_smp_defconfig b/arch/arc/configs/nsim_hs_smp_defconfig
index 96bd1c2..bb2a8dc 100644
--- a/arch/arc/configs/nsim_hs_smp_defconfig
+++ b/arch/arc/configs/nsim_hs_smp_defconfig
@@ -2,6 +2,7 @@
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
@@ -21,13 +22,11 @@
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARC_PLAT_SIM=y
-CONFIG_ARC_BOARD_ML509=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
CONFIG_ARC_BUILTIN_DTB_NAME="nsim_hs_idu"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -35,6 +34,7 @@
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -49,7 +49,6 @@
CONFIG_SERIAL_ARC_CONSOLE=y
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
-# CONFIG_VGA_CONSOLE is not set
# CONFIG_HID is not set
# CONFIG_USB_SUPPORT is not set
# CONFIG_IOMMU_SUPPORT is not set
@@ -60,4 +59,3 @@
CONFIG_NFS_FS=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
-CONFIG_XZ_DEC=y
diff --git a/arch/arc/configs/nsimosci_defconfig b/arch/arc/configs/nsimosci_defconfig
index 31e1d95..646182e 100644
--- a/arch/arc/configs/nsimosci_defconfig
+++ b/arch/arc/configs/nsimosci_defconfig
@@ -33,6 +33,7 @@
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -58,7 +59,6 @@
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_defconfig b/arch/arc/configs/nsimosci_hs_defconfig
index fcae666..ceca254 100644
--- a/arch/arc/configs/nsimosci_hs_defconfig
+++ b/arch/arc/configs/nsimosci_hs_defconfig
@@ -34,12 +34,12 @@
CONFIG_NET_KEY=y
CONFIG_INET=y
# CONFIG_IPV6 is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
# CONFIG_BLK_DEV is not set
CONFIG_NETDEVICES=y
-CONFIG_NET_OSCI_LAN=y
CONFIG_INPUT_EVDEV=y
# CONFIG_MOUSE_PS2_ALPS is not set
# CONFIG_MOUSE_PS2_LOGIPS2PP is not set
@@ -58,7 +58,6 @@
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
diff --git a/arch/arc/configs/nsimosci_hs_smp_defconfig b/arch/arc/configs/nsimosci_hs_smp_defconfig
index b01b659..4b6da90 100644
--- a/arch/arc/configs/nsimosci_hs_smp_defconfig
+++ b/arch/arc/configs/nsimosci_hs_smp_defconfig
@@ -2,6 +2,7 @@
CONFIG_DEFAULT_HOSTNAME="ARCLinux"
# CONFIG_SWAP is not set
CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
@@ -18,15 +19,11 @@
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_ARC_PLAT_SIM=y
-CONFIG_ARC_BOARD_ML509=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
-CONFIG_ARC_HAS_LL64=y
-# CONFIG_ARC_HAS_RTSC is not set
CONFIG_ARC_BUILTIN_DTB_NAME="nsimosci_hs_idu"
CONFIG_PREEMPT=y
# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_PACKET_DIAG=y
@@ -40,6 +37,7 @@
# CONFIG_INET_LRO is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
# CONFIG_FIRMWARE_IN_KERNEL is not set
@@ -56,14 +54,11 @@
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_NET_VENDOR_WIZNET is not set
-CONFIG_NET_OSCI_LAN=y
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
CONFIG_MOUSE_PS2_TOUCHKIT=y
# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_LIBPS2=y
CONFIG_SERIO_ARC_PS2=y
-CONFIG_VT_HW_CONSOLE_BINDING=y
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVKMEM is not set
CONFIG_SERIAL_8250=y
@@ -75,9 +70,6 @@
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
CONFIG_FB=y
-CONFIG_ARCPGU_RGB888=y
-CONFIG_ARCPGU_DISPTYPE=0
-# CONFIG_VGA_CONSOLE is not set
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_LOGO=y
# CONFIG_HID is not set
diff --git a/arch/arc/configs/tb10x_defconfig b/arch/arc/configs/tb10x_defconfig
index 3b4dc9c..9b342ea 100644
--- a/arch/arc/configs/tb10x_defconfig
+++ b/arch/arc/configs/tb10x_defconfig
@@ -3,6 +3,7 @@
CONFIG_DEFAULT_HOSTNAME="tb10x"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
@@ -26,12 +27,10 @@
# CONFIG_BLOCK is not set
CONFIG_ARC_PLAT_TB10X=y
CONFIG_ARC_CACHE_LINE_SHIFT=5
-CONFIG_ARC_STACK_NONEXEC=y
CONFIG_HZ=250
CONFIG_ARC_BUILTIN_DTB_NAME="abilis_tb100_dvk"
CONFIG_PREEMPT_VOLUNTARY=y
# CONFIG_COMPACTION is not set
-# CONFIG_CROSS_MEMORY_ATTACH is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -44,8 +43,8 @@
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
# CONFIG_WIRELESS is not set
+CONFIG_DEVTMPFS=y
# CONFIG_FIRMWARE_IN_KERNEL is not set
-CONFIG_PROC_DEVICETREE=y
CONFIG_NETDEVICES=y
# CONFIG_NET_CADENCE is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
@@ -55,9 +54,6 @@
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_SEEQ is not set
CONFIG_STMMAC_ETH=y
-CONFIG_STMMAC_DEBUG_FS=y
-CONFIG_STMMAC_DA=y
-CONFIG_STMMAC_CHAINED=y
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_WLAN is not set
# CONFIG_INPUT is not set
@@ -91,7 +87,6 @@
CONFIG_LEDS_TRIGGER_TRANSIENT=y
CONFIG_DMADEVICES=y
CONFIG_DW_DMAC=y
-CONFIG_NET_DMA=y
CONFIG_ASYNC_TX_DMA=y
# CONFIG_IOMMU_SUPPORT is not set
# CONFIG_DNOTIFY is not set
@@ -100,17 +95,16 @@
CONFIG_CONFIGFS_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
# CONFIG_NETWORK_FILESYSTEMS is not set
+CONFIG_DEBUG_INFO=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
-CONFIG_MAGIC_SYSRQ=y
CONFIG_STRIP_ASM_SYMS=y
CONFIG_DEBUG_FS=y
CONFIG_HEADERS_CHECK=y
CONFIG_DEBUG_SECTION_MISMATCH=y
+CONFIG_MAGIC_SYSRQ=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DETECT_HUNG_TASK=y
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
-CONFIG_DEBUG_INFO=y
-CONFIG_DEBUG_MEMORY_INIT=y
-CONFIG_DEBUG_STACKOVERFLOW=y
-# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
diff --git a/arch/arc/configs/vdk_hs38_smp_defconfig b/arch/arc/configs/vdk_hs38_smp_defconfig
index f36c047..7359859 100644
--- a/arch/arc/configs/vdk_hs38_smp_defconfig
+++ b/arch/arc/configs/vdk_hs38_smp_defconfig
@@ -16,7 +16,7 @@
CONFIG_AXS103=y
CONFIG_ISA_ARCV2=y
CONFIG_SMP=y
-# CONFIG_ARC_HAS_GRTC is not set
+# CONFIG_ARC_HAS_GFRC is not set
CONFIG_ARC_UBOOT_SUPPORT=y
CONFIG_ARC_BUILTIN_DTB_NAME="vdk_hs38_smp"
CONFIG_PREEMPT=y
diff --git a/arch/arc/include/asm/arcregs.h b/arch/arc/include/asm/arcregs.h
index 7fac7d8..f9f4c6f 100644
--- a/arch/arc/include/asm/arcregs.h
+++ b/arch/arc/include/asm/arcregs.h
@@ -10,7 +10,8 @@
#define _ASM_ARC_ARCREGS_H
/* Build Configuration Registers */
-#define ARC_REG_DCCMBASE_BCR 0x61 /* DCCM Base Addr */
+#define ARC_REG_AUX_DCCM 0x18 /* DCCM Base Addr ARCv2 */
+#define ARC_REG_DCCM_BASE_BUILD 0x61 /* DCCM Base Addr ARCompact */
#define ARC_REG_CRC_BCR 0x62
#define ARC_REG_VECBASE_BCR 0x68
#define ARC_REG_PERIBASE_BCR 0x69
@@ -18,10 +19,10 @@
#define ARC_REG_DPFP_BCR 0x6C /* ARCompact: Dbl Precision FPU */
#define ARC_REG_FP_V2_BCR 0xc8 /* ARCv2 FPU */
#define ARC_REG_SLC_BCR 0xce
-#define ARC_REG_DCCM_BCR 0x74 /* DCCM Present + SZ */
+#define ARC_REG_DCCM_BUILD 0x74 /* DCCM size (common) */
#define ARC_REG_TIMERS_BCR 0x75
#define ARC_REG_AP_BCR 0x76
-#define ARC_REG_ICCM_BCR 0x78
+#define ARC_REG_ICCM_BUILD 0x78 /* ICCM size (common) */
#define ARC_REG_XY_MEM_BCR 0x79
#define ARC_REG_MAC_BCR 0x7a
#define ARC_REG_MUL_BCR 0x7b
@@ -36,6 +37,7 @@
#define ARC_REG_IRQ_BCR 0xF3
#define ARC_REG_SMART_BCR 0xFF
#define ARC_REG_CLUSTER_BCR 0xcf
+#define ARC_REG_AUX_ICCM 0x208 /* ICCM Base Addr (ARCv2) */
/* status32 Bits Positions */
#define STATUS_AE_BIT 5 /* Exception active */
@@ -246,7 +248,7 @@
#endif
};
-struct bcr_iccm {
+struct bcr_iccm_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int base:16, pad:5, sz:3, ver:8;
#else
@@ -254,17 +256,15 @@
#endif
};
-/* DCCM Base Address Register: ARC_REG_DCCMBASE_BCR */
-struct bcr_dccm_base {
+struct bcr_iccm_arcv2 {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int addr:24, ver:8;
+ unsigned int pad:8, sz11:4, sz01:4, sz10:4, sz00:4, ver:8;
#else
- unsigned int ver:8, addr:24;
+ unsigned int ver:8, sz00:4, sz10:4, sz01:4, sz11:4, pad:8;
#endif
};
-/* DCCM RAM Configuration Register: ARC_REG_DCCM_BCR */
-struct bcr_dccm {
+struct bcr_dccm_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int res:21, sz:3, ver:8;
#else
@@ -272,6 +272,14 @@
#endif
};
+struct bcr_dccm_arcv2 {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad2:12, cyc:3, pad1:1, sz1:4, sz0:4, ver:8;
+#else
+ unsigned int ver:8, sz0:4, sz1:4, pad1:1, cyc:3, pad2:12;
+#endif
+};
+
/* ARCompact: Both SP and DP FPU BCRs have same format */
struct bcr_fp_arcompact {
#ifdef CONFIG_CPU_BIG_ENDIAN
@@ -315,9 +323,9 @@
struct bcr_generic {
#ifdef CONFIG_CPU_BIG_ENDIAN
- unsigned int pad:24, ver:8;
+ unsigned int info:24, ver:8;
#else
- unsigned int ver:8, pad:24;
+ unsigned int ver:8, info:24;
#endif
};
@@ -349,14 +357,13 @@
struct cpuinfo_arc_bpu bpu;
struct bcr_identity core;
struct bcr_isa isa;
- struct bcr_timer timers;
unsigned int vec_base;
struct cpuinfo_arc_ccm iccm, dccm;
struct {
unsigned int swap:1, norm:1, minmax:1, barrel:1, crc:1, pad1:3,
fpu_sp:1, fpu_dp:1, pad2:6,
debug:1, ap:1, smart:1, rtt:1, pad3:4,
- pad4:8;
+ timer0:1, timer1:1, rtc:1, gfrc:1, pad4:4;
} extn;
struct bcr_mpy extn_mpy;
struct bcr_extn_xymem extn_xymem;
diff --git a/arch/arc/include/asm/irq.h b/arch/arc/include/asm/irq.h
index 4fd7d62..49014f0 100644
--- a/arch/arc/include/asm/irq.h
+++ b/arch/arc/include/asm/irq.h
@@ -16,11 +16,9 @@
#ifdef CONFIG_ISA_ARCOMPACT
#define TIMER0_IRQ 3
#define TIMER1_IRQ 4
-#define IPI_IRQ (NR_CPU_IRQS-1) /* dummy to enable SMP build for up hardware */
#else
#define TIMER0_IRQ 16
#define TIMER1_IRQ 17
-#define IPI_IRQ 19
#endif
#include <linux/interrupt.h>
diff --git a/arch/arc/include/asm/irqflags-arcv2.h b/arch/arc/include/asm/irqflags-arcv2.h
index 258b0e5..37c2f75 100644
--- a/arch/arc/include/asm/irqflags-arcv2.h
+++ b/arch/arc/include/asm/irqflags-arcv2.h
@@ -22,6 +22,7 @@
#define AUX_IRQ_CTRL 0x00E
#define AUX_IRQ_ACT 0x043 /* Active Intr across all levels */
#define AUX_IRQ_LVL_PEND 0x200 /* Pending Intr across all levels */
+#define AUX_IRQ_HINT 0x201 /* For generating Soft Interrupts */
#define AUX_IRQ_PRIORITY 0x206
#define ICAUSE 0x40a
#define AUX_IRQ_SELECT 0x40b
@@ -30,8 +31,11 @@
/* Was Intr taken in User Mode */
#define AUX_IRQ_ACT_BIT_U 31
-/* 0 is highest level, but taken by FIRQs, if present in design */
-#define ARCV2_IRQ_DEF_PRIO 0
+/*
+ * User space should be interruptable even by lowest prio interrupt
+ * Safe even if actual interrupt priorities is fewer or even one
+ */
+#define ARCV2_IRQ_DEF_PRIO 15
/* seed value for status register */
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \
@@ -112,6 +116,16 @@
return arch_irqs_disabled_flags(arch_local_save_flags());
}
+static inline void arc_softirq_trigger(int irq)
+{
+ write_aux_reg(AUX_IRQ_HINT, irq);
+}
+
+static inline void arc_softirq_clear(int irq)
+{
+ write_aux_reg(AUX_IRQ_HINT, 0);
+}
+
#else
.macro IRQ_DISABLE scratch
diff --git a/arch/arc/include/asm/mcip.h b/arch/arc/include/asm/mcip.h
index 46f4e53..847e3bb 100644
--- a/arch/arc/include/asm/mcip.h
+++ b/arch/arc/include/asm/mcip.h
@@ -39,8 +39,8 @@
#define CMD_DEBUG_SET_MASK 0x34
#define CMD_DEBUG_SET_SELECT 0x36
-#define CMD_GRTC_READ_LO 0x42
-#define CMD_GRTC_READ_HI 0x43
+#define CMD_GFRC_READ_LO 0x42
+#define CMD_GFRC_READ_HI 0x43
#define CMD_IDU_ENABLE 0x71
#define CMD_IDU_DISABLE 0x72
diff --git a/arch/arc/include/asm/pgtable.h b/arch/arc/include/asm/pgtable.h
index 57af2f0..d426d42 100644
--- a/arch/arc/include/asm/pgtable.h
+++ b/arch/arc/include/asm/pgtable.h
@@ -179,37 +179,44 @@
#define __S111 PAGE_U_X_W_R
/****************************************************************
- * Page Table Lookup split
+ * 2 tier (PGD:PTE) software page walker
*
- * We implement 2 tier paging and since this is all software, we are free
- * to customize the span of a PGD / PTE entry to suit us
- *
- * 32 bit virtual address
+ * [31] 32 bit virtual address [0]
* -------------------------------------------------------
- * | BITS_FOR_PGD | BITS_FOR_PTE | BITS_IN_PAGE |
+ * | | <------------ PGDIR_SHIFT ----------> |
+ * | | |
+ * | BITS_FOR_PGD | BITS_FOR_PTE | <-- PAGE_SHIFT --> |
* -------------------------------------------------------
* | | |
* | | --> off in page frame
- * | |
* | ---> index into Page Table
- * |
* ----> index into Page Directory
+ *
+ * In a single page size configuration, only PAGE_SHIFT is fixed
+ * So both PGD and PTE sizing can be tweaked
+ * e.g. 8K page (PAGE_SHIFT 13) can have
+ * - PGDIR_SHIFT 21 -> 11:8:13 address split
+ * - PGDIR_SHIFT 24 -> 8:11:13 address split
+ *
+ * If Super Page is configured, PGDIR_SHIFT becomes fixed too,
+ * so the sizing flexibility is gone.
*/
-#define BITS_IN_PAGE PAGE_SHIFT
-
-/* Optimal Sizing of Pg Tbl - based on MMU page size */
-#if defined(CONFIG_ARC_PAGE_SIZE_8K)
-#define BITS_FOR_PTE 8 /* 11:8:13 */
-#elif defined(CONFIG_ARC_PAGE_SIZE_16K)
-#define BITS_FOR_PTE 8 /* 10:8:14 */
-#elif defined(CONFIG_ARC_PAGE_SIZE_4K)
-#define BITS_FOR_PTE 9 /* 11:9:12 */
+#if defined(CONFIG_ARC_HUGEPAGE_16M)
+#define PGDIR_SHIFT 24
+#elif defined(CONFIG_ARC_HUGEPAGE_2M)
+#define PGDIR_SHIFT 21
+#else
+/*
+ * Only Normal page support so "hackable" (see comment above)
+ * Default value provides 11:8:13 (8K), 11:9:12 (4K)
+ */
+#define PGDIR_SHIFT 21
#endif
-#define BITS_FOR_PGD (32 - BITS_FOR_PTE - BITS_IN_PAGE)
+#define BITS_FOR_PTE (PGDIR_SHIFT - PAGE_SHIFT)
+#define BITS_FOR_PGD (32 - PGDIR_SHIFT)
-#define PGDIR_SHIFT (32 - BITS_FOR_PGD)
#define PGDIR_SIZE (1UL << PGDIR_SHIFT) /* vaddr span, not PDG sz */
#define PGDIR_MASK (~(PGDIR_SIZE-1))
diff --git a/arch/arc/kernel/entry-arcv2.S b/arch/arc/kernel/entry-arcv2.S
index cbfec79..c126460 100644
--- a/arch/arc/kernel/entry-arcv2.S
+++ b/arch/arc/kernel/entry-arcv2.S
@@ -45,11 +45,12 @@
VECTOR handle_interrupt ; (16) Timer0
VECTOR handle_interrupt ; unused (Timer1)
VECTOR handle_interrupt ; unused (WDT)
-VECTOR handle_interrupt ; (19) ICI (inter core interrupt)
-VECTOR handle_interrupt
-VECTOR handle_interrupt
-VECTOR handle_interrupt
-VECTOR handle_interrupt ; (23) End of fixed IRQs
+VECTOR handle_interrupt ; (19) Inter core Interrupt (IPI)
+VECTOR handle_interrupt ; (20) perf Interrupt
+VECTOR handle_interrupt ; (21) Software Triggered Intr (Self IPI)
+VECTOR handle_interrupt ; unused
+VECTOR handle_interrupt ; (23) unused
+# End of fixed IRQs
.rept CONFIG_ARC_NUMBER_OF_INTERRUPTS - 8
VECTOR handle_interrupt
@@ -211,7 +212,11 @@
; (since IRQ NOT allowed in DS in ARCv2, this can only happen if orig
; entry was via Exception in DS which got preempted in kernel).
;
-; IRQ RTIE won't reliably restore DE bit and/or BTA, needs handling
+; IRQ RTIE won't reliably restore DE bit and/or BTA, needs workaround
+;
+; Solution is return from Intr w/o any delay slot quirks into a kernel trampoline
+; and from pure kernel mode return to delay slot which handles DS bit/BTA correctly
+
.Lintr_ret_to_delay_slot:
debug_marker_ds:
@@ -222,18 +227,23 @@
ld r2, [sp, PT_ret]
ld r3, [sp, PT_status32]
+ ; STAT32 for Int return created from scratch
+ ; (No delay dlot, disable Further intr in trampoline)
+
bic r0, r3, STATUS_U_MASK|STATUS_DE_MASK|STATUS_IE_MASK|STATUS_L_MASK
st r0, [sp, PT_status32]
mov r1, .Lintr_ret_to_delay_slot_2
st r1, [sp, PT_ret]
+ ; Orig exception PC/STAT32 safekept @orig_r0 and @event stack slots
st r2, [sp, 0]
st r3, [sp, 4]
b .Lisr_ret_fast_path
.Lintr_ret_to_delay_slot_2:
+ ; Trampoline to restore orig exception PC/STAT32/BTA/AUX_USER_SP
sub sp, sp, SZ_PT_REGS
st r9, [sp, -4]
@@ -243,11 +253,19 @@
ld r9, [sp, 4]
sr r9, [erstatus]
+ ; restore AUX_USER_SP if returning to U mode
+ bbit0 r9, STATUS_U_BIT, 1f
+ ld r9, [sp, PT_sp]
+ sr r9, [AUX_USER_SP]
+
+1:
ld r9, [sp, 8]
sr r9, [erbta]
ld r9, [sp, -4]
add sp, sp, SZ_PT_REGS
+
+ ; return from pure kernel mode to delay slot
rtie
END(ret_from_exception)
diff --git a/arch/arc/kernel/intc-arcv2.c b/arch/arc/kernel/intc-arcv2.c
index 0394f9f..9425263 100644
--- a/arch/arc/kernel/intc-arcv2.c
+++ b/arch/arc/kernel/intc-arcv2.c
@@ -14,6 +14,8 @@
#include <linux/irqchip.h>
#include <asm/irq.h>
+static int irq_prio;
+
/*
* Early Hardware specific Interrupt setup
* -Called very early (start_kernel -> setup_arch -> setup_processor)
@@ -24,6 +26,14 @@
{
unsigned int tmp;
+ struct irq_build {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ unsigned int pad:3, firq:1, prio:4, exts:8, irqs:8, ver:8;
+#else
+ unsigned int ver:8, irqs:8, exts:8, prio:4, firq:1, pad:3;
+#endif
+ } irq_bcr;
+
struct aux_irq_ctrl {
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int res3:18, save_idx_regs:1, res2:1,
@@ -46,28 +56,25 @@
WRITE_AUX(AUX_IRQ_CTRL, ictrl);
- /* setup status32, don't enable intr yet as kernel doesn't want */
- tmp = read_aux_reg(0xa);
- tmp |= ISA_INIT_STATUS_BITS;
- tmp &= ~STATUS_IE_MASK;
- asm volatile("flag %0 \n"::"r"(tmp));
-
/*
* ARCv2 core intc provides multiple interrupt priorities (upto 16).
* Typical builds though have only two levels (0-high, 1-low)
* Linux by default uses lower prio 1 for most irqs, reserving 0 for
* NMI style interrupts in future (say perf)
- *
- * Read the intc BCR to confirm that Linux default priority is avail
- * in h/w
- *
- * Note:
- * IRQ_BCR[27..24] contains N-1 (for N priority levels) and prio level
- * is 0 based.
*/
- tmp = (read_aux_reg(ARC_REG_IRQ_BCR) >> 24 ) & 0xF;
- if (ARCV2_IRQ_DEF_PRIO > tmp)
- panic("Linux default irq prio incorrect\n");
+
+ READ_BCR(ARC_REG_IRQ_BCR, irq_bcr);
+
+ irq_prio = irq_bcr.prio; /* Encoded as N-1 for N levels */
+ pr_info("archs-intc\t: %d priority levels (default %d)%s\n",
+ irq_prio + 1, irq_prio,
+ irq_bcr.firq ? " FIRQ (not used)":"");
+
+ /* setup status32, don't enable intr yet as kernel doesn't want */
+ tmp = read_aux_reg(0xa);
+ tmp |= STATUS_AD_MASK | (irq_prio << 1);
+ tmp &= ~STATUS_IE_MASK;
+ asm volatile("flag %0 \n"::"r"(tmp));
}
static void arcv2_irq_mask(struct irq_data *data)
@@ -86,7 +93,7 @@
{
/* set default priority */
write_aux_reg(AUX_IRQ_SELECT, data->irq);
- write_aux_reg(AUX_IRQ_PRIORITY, ARCV2_IRQ_DEF_PRIO);
+ write_aux_reg(AUX_IRQ_PRIORITY, irq_prio);
/*
* hw auto enables (linux unmask) all by default
diff --git a/arch/arc/kernel/intc-compact.c b/arch/arc/kernel/intc-compact.c
index 06bcedf..224d1c3 100644
--- a/arch/arc/kernel/intc-compact.c
+++ b/arch/arc/kernel/intc-compact.c
@@ -81,9 +81,6 @@
{
switch (irq) {
case TIMER0_IRQ:
-#ifdef CONFIG_SMP
- case IPI_IRQ:
-#endif
irq_set_chip_and_handler(irq, &onchip_intc, handle_percpu_irq);
break;
default:
diff --git a/arch/arc/kernel/mcip.c b/arch/arc/kernel/mcip.c
index bd237ac..c41c364 100644
--- a/arch/arc/kernel/mcip.c
+++ b/arch/arc/kernel/mcip.c
@@ -11,9 +11,13 @@
#include <linux/smp.h>
#include <linux/irq.h>
#include <linux/spinlock.h>
+#include <asm/irqflags-arcv2.h>
#include <asm/mcip.h>
#include <asm/setup.h>
+#define IPI_IRQ 19
+#define SOFTIRQ_IRQ 21
+
static char smp_cpuinfo_buf[128];
static int idu_detected;
@@ -22,6 +26,7 @@
static void mcip_setup_per_cpu(int cpu)
{
smp_ipi_irq_setup(cpu, IPI_IRQ);
+ smp_ipi_irq_setup(cpu, SOFTIRQ_IRQ);
}
static void mcip_ipi_send(int cpu)
@@ -29,46 +34,44 @@
unsigned long flags;
int ipi_was_pending;
+ /* ARConnect can only send IPI to others */
+ if (unlikely(cpu == raw_smp_processor_id())) {
+ arc_softirq_trigger(SOFTIRQ_IRQ);
+ return;
+ }
+
+ raw_spin_lock_irqsave(&mcip_lock, flags);
+
/*
- * NOTE: We must spin here if the other cpu hasn't yet
- * serviced a previous message. This can burn lots
- * of time, but we MUST follows this protocol or
- * ipi messages can be lost!!!
- * Also, we must release the lock in this loop because
- * the other side may get to this same loop and not
- * be able to ack -- thus causing deadlock.
+ * If receiver already has a pending interrupt, elide sending this one.
+ * Linux cross core calling works well with concurrent IPIs
+ * coalesced into one
+ * see arch/arc/kernel/smp.c: ipi_send_msg_one()
*/
+ __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
+ ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
+ if (!ipi_was_pending)
+ __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
- do {
- raw_spin_lock_irqsave(&mcip_lock, flags);
- __mcip_cmd(CMD_INTRPT_READ_STATUS, cpu);
- ipi_was_pending = read_aux_reg(ARC_REG_MCIP_READBACK);
- if (ipi_was_pending == 0)
- break; /* break out but keep lock */
- raw_spin_unlock_irqrestore(&mcip_lock, flags);
- } while (1);
-
- __mcip_cmd(CMD_INTRPT_GENERATE_IRQ, cpu);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
-
-#ifdef CONFIG_ARC_IPI_DBG
- if (ipi_was_pending)
- pr_info("IPI ACK delayed from cpu %d\n", cpu);
-#endif
}
static void mcip_ipi_clear(int irq)
{
unsigned int cpu, c;
unsigned long flags;
- unsigned int __maybe_unused copy;
+
+ if (unlikely(irq == SOFTIRQ_IRQ)) {
+ arc_softirq_clear(irq);
+ return;
+ }
raw_spin_lock_irqsave(&mcip_lock, flags);
/* Who sent the IPI */
__mcip_cmd(CMD_INTRPT_CHECK_SOURCE, 0);
- copy = cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
+ cpu = read_aux_reg(ARC_REG_MCIP_READBACK); /* 1,2,4,8... */
/*
* In rare case, multiple concurrent IPIs sent to same target can
@@ -82,12 +85,6 @@
} while (cpu);
raw_spin_unlock_irqrestore(&mcip_lock, flags);
-
-#ifdef CONFIG_ARC_IPI_DBG
- if (c != __ffs(copy))
- pr_info("IPIs from %x coalesced to %x\n",
- copy, raw_smp_processor_id());
-#endif
}
static void mcip_probe_n_setup(void)
@@ -96,13 +93,13 @@
#ifdef CONFIG_CPU_BIG_ENDIAN
unsigned int pad3:8,
idu:1, llm:1, num_cores:6,
- iocoh:1, grtc:1, dbg:1, pad2:1,
+ iocoh:1, gfrc:1, dbg:1, pad2:1,
msg:1, sem:1, ipi:1, pad:1,
ver:8;
#else
unsigned int ver:8,
pad:1, ipi:1, sem:1, msg:1,
- pad2:1, dbg:1, grtc:1, iocoh:1,
+ pad2:1, dbg:1, gfrc:1, iocoh:1,
num_cores:6, llm:1, idu:1,
pad3:8;
#endif
@@ -111,12 +108,13 @@
READ_BCR(ARC_REG_MCIP_BCR, mp);
sprintf(smp_cpuinfo_buf,
- "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s\n",
+ "Extn [SMP]\t: ARConnect (v%d): %d cores with %s%s%s%s%s\n",
mp.ver, mp.num_cores,
IS_AVAIL1(mp.ipi, "IPI "),
IS_AVAIL1(mp.idu, "IDU "),
+ IS_AVAIL1(mp.llm, "LLM "),
IS_AVAIL1(mp.dbg, "DEBUG "),
- IS_AVAIL1(mp.grtc, "GRTC"));
+ IS_AVAIL1(mp.gfrc, "GFRC"));
idu_detected = mp.idu;
@@ -125,8 +123,8 @@
__mcip_cmd_data(CMD_DEBUG_SET_MASK, 0xf, 0xf);
}
- if (IS_ENABLED(CONFIG_ARC_HAS_GRTC) && !mp.grtc)
- panic("kernel trying to use non-existent GRTC\n");
+ if (IS_ENABLED(CONFIG_ARC_HAS_GFRC) && !mp.gfrc)
+ panic("kernel trying to use non-existent GFRC\n");
}
struct plat_smp_ops plat_smp_ops = {
diff --git a/arch/arc/kernel/setup.c b/arch/arc/kernel/setup.c
index e1b8744..cdc821d 100644
--- a/arch/arc/kernel/setup.c
+++ b/arch/arc/kernel/setup.c
@@ -42,9 +42,57 @@
struct cpuinfo_arc cpuinfo_arc700[NR_CPUS];
+static void read_decode_ccm_bcr(struct cpuinfo_arc *cpu)
+{
+ if (is_isa_arcompact()) {
+ struct bcr_iccm_arcompact iccm;
+ struct bcr_dccm_arcompact dccm;
+
+ READ_BCR(ARC_REG_ICCM_BUILD, iccm);
+ if (iccm.ver) {
+ cpu->iccm.sz = 4096 << iccm.sz; /* 8K to 512K */
+ cpu->iccm.base_addr = iccm.base << 16;
+ }
+
+ READ_BCR(ARC_REG_DCCM_BUILD, dccm);
+ if (dccm.ver) {
+ unsigned long base;
+ cpu->dccm.sz = 2048 << dccm.sz; /* 2K to 256K */
+
+ base = read_aux_reg(ARC_REG_DCCM_BASE_BUILD);
+ cpu->dccm.base_addr = base & ~0xF;
+ }
+ } else {
+ struct bcr_iccm_arcv2 iccm;
+ struct bcr_dccm_arcv2 dccm;
+ unsigned long region;
+
+ READ_BCR(ARC_REG_ICCM_BUILD, iccm);
+ if (iccm.ver) {
+ cpu->iccm.sz = 256 << iccm.sz00; /* 512B to 16M */
+ if (iccm.sz00 == 0xF && iccm.sz01 > 0)
+ cpu->iccm.sz <<= iccm.sz01;
+
+ region = read_aux_reg(ARC_REG_AUX_ICCM);
+ cpu->iccm.base_addr = region & 0xF0000000;
+ }
+
+ READ_BCR(ARC_REG_DCCM_BUILD, dccm);
+ if (dccm.ver) {
+ cpu->dccm.sz = 256 << dccm.sz0;
+ if (dccm.sz0 == 0xF && dccm.sz1 > 0)
+ cpu->dccm.sz <<= dccm.sz1;
+
+ region = read_aux_reg(ARC_REG_AUX_DCCM);
+ cpu->dccm.base_addr = region & 0xF0000000;
+ }
+ }
+}
+
static void read_arc_build_cfg_regs(void)
{
struct bcr_perip uncached_space;
+ struct bcr_timer timer;
struct bcr_generic bcr;
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
unsigned long perip_space;
@@ -53,7 +101,11 @@
READ_BCR(AUX_IDENTITY, cpu->core);
READ_BCR(ARC_REG_ISA_CFG_BCR, cpu->isa);
- READ_BCR(ARC_REG_TIMERS_BCR, cpu->timers);
+ READ_BCR(ARC_REG_TIMERS_BCR, timer);
+ cpu->extn.timer0 = timer.t0;
+ cpu->extn.timer1 = timer.t1;
+ cpu->extn.rtc = timer.rtc;
+
cpu->vec_base = read_aux_reg(AUX_INTR_VEC_BASE);
READ_BCR(ARC_REG_D_UNCACH_BCR, uncached_space);
@@ -71,36 +123,11 @@
cpu->extn.swap = read_aux_reg(ARC_REG_SWAP_BCR) ? 1 : 0; /* 1,3 */
cpu->extn.crc = read_aux_reg(ARC_REG_CRC_BCR) ? 1 : 0;
cpu->extn.minmax = read_aux_reg(ARC_REG_MIXMAX_BCR) > 1 ? 1 : 0; /* 2 */
-
- /* Note that we read the CCM BCRs independent of kernel config
- * This is to catch the cases where user doesn't know that
- * CCMs are present in hardware build
- */
- {
- struct bcr_iccm iccm;
- struct bcr_dccm dccm;
- struct bcr_dccm_base dccm_base;
- unsigned int bcr_32bit_val;
-
- bcr_32bit_val = read_aux_reg(ARC_REG_ICCM_BCR);
- if (bcr_32bit_val) {
- iccm = *((struct bcr_iccm *)&bcr_32bit_val);
- cpu->iccm.base_addr = iccm.base << 16;
- cpu->iccm.sz = 0x2000 << (iccm.sz - 1);
- }
-
- bcr_32bit_val = read_aux_reg(ARC_REG_DCCM_BCR);
- if (bcr_32bit_val) {
- dccm = *((struct bcr_dccm *)&bcr_32bit_val);
- cpu->dccm.sz = 0x800 << (dccm.sz);
-
- READ_BCR(ARC_REG_DCCMBASE_BCR, dccm_base);
- cpu->dccm.base_addr = dccm_base.addr << 8;
- }
- }
-
READ_BCR(ARC_REG_XY_MEM_BCR, cpu->extn_xymem);
+ /* Read CCM BCRs for boot reporting even if not enabled in Kconfig */
+ read_decode_ccm_bcr(cpu);
+
read_decode_mmu_bcr();
read_decode_cache_bcr();
@@ -208,9 +235,9 @@
(unsigned int)(arc_get_core_freq() / 10000) % 100);
n += scnprintf(buf + n, len - n, "Timers\t\t: %s%s%s%s\nISA Extn\t: ",
- IS_AVAIL1(cpu->timers.t0, "Timer0 "),
- IS_AVAIL1(cpu->timers.t1, "Timer1 "),
- IS_AVAIL2(cpu->timers.rtc, "64-bit RTC ",
+ IS_AVAIL1(cpu->extn.timer0, "Timer0 "),
+ IS_AVAIL1(cpu->extn.timer1, "Timer1 "),
+ IS_AVAIL2(cpu->extn.rtc, "Local-64-bit-Ctr ",
CONFIG_ARC_HAS_RTC));
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s",
@@ -232,8 +259,6 @@
n += scnprintf(buf + n, len - n, "mpy[opt %d] ", opt);
}
- n += scnprintf(buf + n, len - n, "%s",
- IS_USED_CFG(CONFIG_ARC_HAS_HW_MPY));
}
n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s%s%s\n",
@@ -293,13 +318,13 @@
struct cpuinfo_arc *cpu = &cpuinfo_arc700[smp_processor_id()];
int fpu_enabled;
- if (!cpu->timers.t0)
+ if (!cpu->extn.timer0)
panic("Timer0 is not present!\n");
- if (!cpu->timers.t1)
+ if (!cpu->extn.timer1)
panic("Timer1 is not present!\n");
- if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->timers.rtc)
+ if (IS_ENABLED(CONFIG_ARC_HAS_RTC) && !cpu->extn.rtc)
panic("RTC is not present\n");
#ifdef CONFIG_ARC_HAS_DCCM
@@ -334,6 +359,7 @@
panic("FPU non-existent, disable CONFIG_ARC_FPU_SAVE_RESTORE\n");
if (is_isa_arcv2() && IS_ENABLED(CONFIG_SMP) && cpu->isa.atomic &&
+ IS_ENABLED(CONFIG_ARC_HAS_LLSC) &&
!IS_ENABLED(CONFIG_ARC_STAR_9000923308))
panic("llock/scond livelock workaround missing\n");
}
diff --git a/arch/arc/kernel/smp.c b/arch/arc/kernel/smp.c
index ef6e9e1..424e937 100644
--- a/arch/arc/kernel/smp.c
+++ b/arch/arc/kernel/smp.c
@@ -336,11 +336,8 @@
int rc;
rc = __do_IPI(msg);
-#ifdef CONFIG_ARC_IPI_DBG
- /* IPI received but no valid @msg */
if (rc)
pr_info("IPI with bogus msg %ld in %ld\n", msg, copy);
-#endif
pending &= ~(1U << msg);
} while (pending);
diff --git a/arch/arc/kernel/time.c b/arch/arc/kernel/time.c
index dfad287..156d983 100644
--- a/arch/arc/kernel/time.c
+++ b/arch/arc/kernel/time.c
@@ -62,7 +62,7 @@
/********** Clock Source Device *********/
-#ifdef CONFIG_ARC_HAS_GRTC
+#ifdef CONFIG_ARC_HAS_GFRC
static int arc_counter_setup(void)
{
@@ -83,10 +83,10 @@
local_irq_save(flags);
- __mcip_cmd(CMD_GRTC_READ_LO, 0);
+ __mcip_cmd(CMD_GFRC_READ_LO, 0);
stamp.l = read_aux_reg(ARC_REG_MCIP_READBACK);
- __mcip_cmd(CMD_GRTC_READ_HI, 0);
+ __mcip_cmd(CMD_GFRC_READ_HI, 0);
stamp.h = read_aux_reg(ARC_REG_MCIP_READBACK);
local_irq_restore(flags);
@@ -95,7 +95,7 @@
}
static struct clocksource arc_counter = {
- .name = "ARConnect GRTC",
+ .name = "ARConnect GFRC",
.rating = 400,
.read = arc_counter_read,
.mask = CLOCKSOURCE_MASK(64),
diff --git a/arch/arm/boot/dts/am335x-bone-common.dtsi b/arch/arm/boot/dts/am335x-bone-common.dtsi
index f3db13d..0cc150b 100644
--- a/arch/arm/boot/dts/am335x-bone-common.dtsi
+++ b/arch/arm/boot/dts/am335x-bone-common.dtsi
@@ -285,8 +285,10 @@
};
};
+
+/include/ "tps65217.dtsi"
+
&tps {
- compatible = "ti,tps65217";
/*
* Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
* mode") at poweroff. Most BeagleBone versions do not support RTC-only
@@ -307,17 +309,12 @@
ti,pmic-shutdown-controller;
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: regulator@0 {
- reg = <0>;
regulator-name = "vdds_dpr";
regulator-always-on;
};
dcdc2_reg: regulator@1 {
- reg = <1>;
/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <925000>;
@@ -327,7 +324,6 @@
};
dcdc3_reg: regulator@2 {
- reg = <2>;
/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <925000>;
@@ -337,25 +333,21 @@
};
ldo1_reg: regulator@3 {
- reg = <3>;
regulator-name = "vio,vrtc,vdds";
regulator-always-on;
};
ldo2_reg: regulator@4 {
- reg = <4>;
regulator-name = "vdd_3v3aux";
regulator-always-on;
};
ldo3_reg: regulator@5 {
- reg = <5>;
regulator-name = "vdd_1v8";
regulator-always-on;
};
ldo4_reg: regulator@6 {
- reg = <6>;
regulator-name = "vdd_3v3a";
regulator-always-on;
};
diff --git a/arch/arm/boot/dts/am335x-chilisom.dtsi b/arch/arm/boot/dts/am335x-chilisom.dtsi
index fda457b..857d989 100644
--- a/arch/arm/boot/dts/am335x-chilisom.dtsi
+++ b/arch/arm/boot/dts/am335x-chilisom.dtsi
@@ -128,21 +128,16 @@
};
+/include/ "tps65217.dtsi"
+
&tps {
- compatible = "ti,tps65217";
-
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: regulator@0 {
- reg = <0>;
regulator-name = "vdds_dpr";
regulator-always-on;
};
dcdc2_reg: regulator@1 {
- reg = <1>;
/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <925000>;
@@ -152,7 +147,6 @@
};
dcdc3_reg: regulator@2 {
- reg = <2>;
/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <925000>;
@@ -162,28 +156,24 @@
};
ldo1_reg: regulator@3 {
- reg = <3>;
regulator-name = "vio,vrtc,vdds";
regulator-boot-on;
regulator-always-on;
};
ldo2_reg: regulator@4 {
- reg = <4>;
regulator-name = "vdd_3v3aux";
regulator-boot-on;
regulator-always-on;
};
ldo3_reg: regulator@5 {
- reg = <5>;
regulator-name = "vdd_1v8";
regulator-boot-on;
regulator-always-on;
};
ldo4_reg: regulator@6 {
- reg = <6>;
regulator-name = "vdd_3v3d";
regulator-boot-on;
regulator-always-on;
diff --git a/arch/arm/boot/dts/am335x-nano.dts b/arch/arm/boot/dts/am335x-nano.dts
index 77559a1..f313999 100644
--- a/arch/arm/boot/dts/am335x-nano.dts
+++ b/arch/arm/boot/dts/am335x-nano.dts
@@ -375,15 +375,11 @@
wp-gpios = <&gpio3 18 0>;
};
+#include "tps65217.dtsi"
+
&tps {
- compatible = "ti,tps65217";
-
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: regulator@0 {
- reg = <0>;
/* +1.5V voltage with ±4% tolerance */
regulator-min-microvolt = <1450000>;
regulator-max-microvolt = <1550000>;
@@ -392,7 +388,6 @@
};
dcdc2_reg: regulator@1 {
- reg = <1>;
/* VDD_MPU voltage limits 0.95V - 1.1V with ±4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <915000>;
@@ -402,7 +397,6 @@
};
dcdc3_reg: regulator@2 {
- reg = <2>;
/* VDD_CORE voltage limits 0.95V - 1.1V with ±4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <915000>;
@@ -412,7 +406,6 @@
};
ldo1_reg: regulator@3 {
- reg = <3>;
/* +1.8V voltage with ±4% tolerance */
regulator-min-microvolt = <1750000>;
regulator-max-microvolt = <1870000>;
@@ -421,7 +414,6 @@
};
ldo2_reg: regulator@4 {
- reg = <4>;
/* +3.3V voltage with ±4% tolerance */
regulator-min-microvolt = <3175000>;
regulator-max-microvolt = <3430000>;
@@ -430,7 +422,6 @@
};
ldo3_reg: regulator@5 {
- reg = <5>;
/* +1.8V voltage with ±4% tolerance */
regulator-min-microvolt = <1750000>;
regulator-max-microvolt = <1870000>;
@@ -439,7 +430,6 @@
};
ldo4_reg: regulator@6 {
- reg = <6>;
/* +3.3V voltage with ±4% tolerance */
regulator-min-microvolt = <3175000>;
regulator-max-microvolt = <3430000>;
diff --git a/arch/arm/boot/dts/am335x-pepper.dts b/arch/arm/boot/dts/am335x-pepper.dts
index 471a3a7..8867aaa 100644
--- a/arch/arm/boot/dts/am335x-pepper.dts
+++ b/arch/arm/boot/dts/am335x-pepper.dts
@@ -420,9 +420,9 @@
vin-supply = <&vbat>;
};
-&tps {
- compatible = "ti,tps65217";
+/include/ "tps65217.dtsi"
+&tps {
backlight {
isel = <1>; /* ISET1 */
fdim = <200>; /* TPS65217_BL_FDIM_200HZ */
@@ -430,17 +430,12 @@
};
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: regulator@0 {
- reg = <0>;
/* VDD_1V8 system supply */
regulator-always-on;
};
dcdc2_reg: regulator@1 {
- reg = <1>;
/* VDD_CORE voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <925000>;
@@ -450,7 +445,6 @@
};
dcdc3_reg: regulator@2 {
- reg = <2>;
/* VDD_MPU voltage limits 0.95V - 1.1V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <925000>;
@@ -460,21 +454,18 @@
};
ldo1_reg: regulator@3 {
- reg = <3>;
/* VRTC 1.8V always-on supply */
regulator-name = "vrtc,vdds";
regulator-always-on;
};
ldo2_reg: regulator@4 {
- reg = <4>;
/* 3.3V rail */
regulator-name = "vdd_3v3aux";
regulator-always-on;
};
ldo3_reg: regulator@5 {
- reg = <5>;
/* VDD_3V3A 3.3V rail */
regulator-name = "vdd_3v3a";
regulator-min-microvolt = <3300000>;
@@ -482,7 +473,6 @@
};
ldo4_reg: regulator@6 {
- reg = <6>;
/* VDD_3V3B 3.3V rail */
regulator-name = "vdd_3v3b";
regulator-always-on;
diff --git a/arch/arm/boot/dts/am335x-shc.dts b/arch/arm/boot/dts/am335x-shc.dts
index 1b5b044..865de85 100644
--- a/arch/arm/boot/dts/am335x-shc.dts
+++ b/arch/arm/boot/dts/am335x-shc.dts
@@ -46,7 +46,7 @@
gpios = <&gpio1 29 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_BACK>;
debounce-interval = <1000>;
- gpio-key,wakeup;
+ wakeup-source;
};
front_button {
@@ -54,7 +54,7 @@
gpios = <&gpio1 25 GPIO_ACTIVE_HIGH>;
linux,code = <KEY_FRONT>;
debounce-interval = <1000>;
- gpio-key,wakeup;
+ wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/am335x-sl50.dts b/arch/arm/boot/dts/am335x-sl50.dts
index d38edfa..3303c28 100644
--- a/arch/arm/boot/dts/am335x-sl50.dts
+++ b/arch/arm/boot/dts/am335x-sl50.dts
@@ -375,19 +375,16 @@
pinctrl-0 = <&uart4_pins>;
};
+#include "tps65217.dtsi"
+
&tps {
- compatible = "ti,tps65217";
ti,pmic-shutdown-controller;
interrupt-parent = <&intc>;
interrupts = <7>; /* NNMI */
regulators {
- #address-cells = <1>;
- #size-cells = <0>;
-
dcdc1_reg: regulator@0 {
- reg = <0>;
/* VDDS_DDR */
regulator-min-microvolt = <1500000>;
regulator-max-microvolt = <1500000>;
@@ -395,7 +392,6 @@
};
dcdc2_reg: regulator@1 {
- reg = <1>;
/* VDD_MPU voltage limits 0.95V - 1.26V with +/-4% tolerance */
regulator-name = "vdd_mpu";
regulator-min-microvolt = <925000>;
@@ -405,7 +401,6 @@
};
dcdc3_reg: regulator@2 {
- reg = <2>;
/* VDD_CORE voltage limits 0.95V - 1.1V with +/-4% tolerance */
regulator-name = "vdd_core";
regulator-min-microvolt = <925000>;
@@ -415,7 +410,6 @@
};
ldo1_reg: regulator@3 {
- reg = <3>;
/* VRTC / VIO / VDDS*/
regulator-always-on;
regulator-min-microvolt = <1800000>;
@@ -423,7 +417,6 @@
};
ldo2_reg: regulator@4 {
- reg = <4>;
/* VDD_3V3AUX */
regulator-always-on;
regulator-min-microvolt = <3300000>;
@@ -431,7 +424,6 @@
};
ldo3_reg: regulator@5 {
- reg = <5>;
/* VDD_1V8 */
regulator-min-microvolt = <1800000>;
regulator-max-microvolt = <1800000>;
@@ -439,7 +431,6 @@
};
ldo4_reg: regulator@6 {
- reg = <6>;
/* VDD_3V3A */
regulator-min-microvolt = <3300000>;
regulator-max-microvolt = <3300000>;
diff --git a/arch/arm/boot/dts/am57xx-beagle-x15.dts b/arch/arm/boot/dts/am57xx-beagle-x15.dts
index 36c0fa6..a0986c6 100644
--- a/arch/arm/boot/dts/am57xx-beagle-x15.dts
+++ b/arch/arm/boot/dts/am57xx-beagle-x15.dts
@@ -173,6 +173,8 @@
sound0_master: simple-audio-card,codec {
sound-dai = <&tlv320aic3104>;
+ assigned-clocks = <&clkoutmux2_clk_mux>;
+ assigned-clock-parents = <&sys_clk2_dclk_div>;
clocks = <&clkout2_clk>;
};
};
@@ -796,6 +798,8 @@
pinctrl-names = "default", "sleep";
pinctrl-0 = <&mcasp3_pins_default>;
pinctrl-1 = <&mcasp3_pins_sleep>;
+ assigned-clocks = <&mcasp3_ahclkx_mux>;
+ assigned-clock-parents = <&sys_clkin2>;
status = "okay";
op-mode = <0>; /* MCASP_IIS_MODE */
diff --git a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
index 8d93882..1c06cb7 100644
--- a/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
+++ b/arch/arm/boot/dts/am57xx-cl-som-am57x.dts
@@ -545,7 +545,7 @@
ti,debounce-tol = /bits/ 16 <10>;
ti,debounce-rep = /bits/ 16 <1>;
- linux,wakeup;
+ wakeup-source;
};
};
diff --git a/arch/arm/boot/dts/imx6qdl.dtsi b/arch/arm/boot/dts/imx6qdl.dtsi
index 4f6ae92..f74d3db 100644
--- a/arch/arm/boot/dts/imx6qdl.dtsi
+++ b/arch/arm/boot/dts/imx6qdl.dtsi
@@ -896,7 +896,6 @@
#size-cells = <1>;
reg = <0x2100000 0x10000>;
ranges = <0 0x2100000 0x10000>;
- interrupt-parent = <&intc>;
clocks = <&clks IMX6QDL_CLK_CAAM_MEM>,
<&clks IMX6QDL_CLK_CAAM_ACLK>,
<&clks IMX6QDL_CLK_CAAM_IPG>,
diff --git a/arch/arm/boot/dts/kirkwood-ds112.dts b/arch/arm/boot/dts/kirkwood-ds112.dts
index bf4143c..b84af3d 100644
--- a/arch/arm/boot/dts/kirkwood-ds112.dts
+++ b/arch/arm/boot/dts/kirkwood-ds112.dts
@@ -14,7 +14,7 @@
#include "kirkwood-synology.dtsi"
/ {
- model = "Synology DS111";
+ model = "Synology DS112";
compatible = "synology,ds111", "marvell,kirkwood";
memory {
diff --git a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
index 4207882..aae8a7a 100644
--- a/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
+++ b/arch/arm/boot/dts/orion5x-linkstation-lswtgl.dts
@@ -228,6 +228,37 @@
};
};
+&devbus_bootcs {
+ status = "okay";
+ devbus,keep-config;
+
+ flash@0 {
+ compatible = "jedec-flash";
+ reg = <0 0x40000>;
+ bank-width = <1>;
+
+ partitions {
+ compatible = "fixed-partitions";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ header@0 {
+ reg = <0 0x30000>;
+ read-only;
+ };
+
+ uboot@30000 {
+ reg = <0x30000 0xF000>;
+ read-only;
+ };
+
+ uboot_env@3F000 {
+ reg = <0x3F000 0x1000>;
+ };
+ };
+ };
+};
+
&mdio {
status = "okay";
diff --git a/arch/arm/boot/dts/sama5d2-pinfunc.h b/arch/arm/boot/dts/sama5d2-pinfunc.h
index 1afe246..b0c912fe 100644
--- a/arch/arm/boot/dts/sama5d2-pinfunc.h
+++ b/arch/arm/boot/dts/sama5d2-pinfunc.h
@@ -90,7 +90,7 @@
#define PIN_PA14__I2SC1_MCK PINMUX_PIN(PIN_PA14, 4, 2)
#define PIN_PA14__FLEXCOM3_IO2 PINMUX_PIN(PIN_PA14, 5, 1)
#define PIN_PA14__D9 PINMUX_PIN(PIN_PA14, 6, 2)
-#define PIN_PA15 14
+#define PIN_PA15 15
#define PIN_PA15__GPIO PINMUX_PIN(PIN_PA15, 0, 0)
#define PIN_PA15__SPI0_MOSI PINMUX_PIN(PIN_PA15, 1, 1)
#define PIN_PA15__TF1 PINMUX_PIN(PIN_PA15, 2, 1)
diff --git a/arch/arm/boot/dts/tps65217.dtsi b/arch/arm/boot/dts/tps65217.dtsi
new file mode 100644
index 0000000..a632724
--- /dev/null
+++ b/arch/arm/boot/dts/tps65217.dtsi
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2012 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/*
+ * Integrated Power Management Chip
+ * http://www.ti.com/lit/ds/symlink/tps65217.pdf
+ */
+
+&tps {
+ compatible = "ti,tps65217";
+
+ regulators {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ dcdc1_reg: regulator@0 {
+ reg = <0>;
+ regulator-compatible = "dcdc1";
+ };
+
+ dcdc2_reg: regulator@1 {
+ reg = <1>;
+ regulator-compatible = "dcdc2";
+ };
+
+ dcdc3_reg: regulator@2 {
+ reg = <2>;
+ regulator-compatible = "dcdc3";
+ };
+
+ ldo1_reg: regulator@3 {
+ reg = <3>;
+ regulator-compatible = "ldo1";
+ };
+
+ ldo2_reg: regulator@4 {
+ reg = <4>;
+ regulator-compatible = "ldo2";
+ };
+
+ ldo3_reg: regulator@5 {
+ reg = <5>;
+ regulator-compatible = "ldo3";
+ };
+
+ ldo4_reg: regulator@6 {
+ reg = <6>;
+ regulator-compatible = "ldo4";
+ };
+ };
+};
diff --git a/arch/arm/common/icst.c b/arch/arm/common/icst.c
index 2dc6da70..d7ed252 100644
--- a/arch/arm/common/icst.c
+++ b/arch/arm/common/icst.c
@@ -16,7 +16,7 @@
*/
#include <linux/module.h>
#include <linux/kernel.h>
-
+#include <asm/div64.h>
#include <asm/hardware/icst.h>
/*
@@ -29,7 +29,11 @@
unsigned long icst_hz(const struct icst_params *p, struct icst_vco vco)
{
- return p->ref * 2 * (vco.v + 8) / ((vco.r + 2) * p->s2div[vco.s]);
+ u64 dividend = p->ref * 2 * (u64)(vco.v + 8);
+ u32 divisor = (vco.r + 2) * p->s2div[vco.s];
+
+ do_div(dividend, divisor);
+ return (unsigned long)dividend;
}
EXPORT_SYMBOL(icst_hz);
@@ -58,6 +62,7 @@
if (f > p->vco_min && f <= p->vco_max)
break;
+ i++;
} while (i < 8);
if (i >= 8)
diff --git a/arch/arm/configs/omap2plus_defconfig b/arch/arm/configs/omap2plus_defconfig
index a715174..d18d6b4 100644
--- a/arch/arm/configs/omap2plus_defconfig
+++ b/arch/arm/configs/omap2plus_defconfig
@@ -292,24 +292,23 @@
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
-CONFIG_OMAP2_DSS=m
-CONFIG_OMAP5_DSS_HDMI=y
-CONFIG_OMAP2_DSS_SDI=y
-CONFIG_OMAP2_DSS_DSI=y
+CONFIG_FB_OMAP5_DSS_HDMI=y
+CONFIG_FB_OMAP2_DSS_SDI=y
+CONFIG_FB_OMAP2_DSS_DSI=y
CONFIG_FB_OMAP2=m
-CONFIG_DISPLAY_ENCODER_TFP410=m
-CONFIG_DISPLAY_ENCODER_TPD12S015=m
-CONFIG_DISPLAY_CONNECTOR_DVI=m
-CONFIG_DISPLAY_CONNECTOR_HDMI=m
-CONFIG_DISPLAY_CONNECTOR_ANALOG_TV=m
-CONFIG_DISPLAY_PANEL_DPI=m
-CONFIG_DISPLAY_PANEL_DSI_CM=m
-CONFIG_DISPLAY_PANEL_SONY_ACX565AKM=m
-CONFIG_DISPLAY_PANEL_LGPHILIPS_LB035Q02=m
-CONFIG_DISPLAY_PANEL_SHARP_LS037V7DW01=m
-CONFIG_DISPLAY_PANEL_TPO_TD028TTEC1=m
-CONFIG_DISPLAY_PANEL_TPO_TD043MTEA1=m
-CONFIG_DISPLAY_PANEL_NEC_NL8048HL11=m
+CONFIG_FB_OMAP2_ENCODER_TFP410=m
+CONFIG_FB_OMAP2_ENCODER_TPD12S015=m
+CONFIG_FB_OMAP2_CONNECTOR_DVI=m
+CONFIG_FB_OMAP2_CONNECTOR_HDMI=m
+CONFIG_FB_OMAP2_CONNECTOR_ANALOG_TV=m
+CONFIG_FB_OMAP2_PANEL_DPI=m
+CONFIG_FB_OMAP2_PANEL_DSI_CM=m
+CONFIG_FB_OMAP2_PANEL_SONY_ACX565AKM=m
+CONFIG_FB_OMAP2_PANEL_LGPHILIPS_LB035Q02=m
+CONFIG_FB_OMAP2_PANEL_SHARP_LS037V7DW01=m
+CONFIG_FB_OMAP2_PANEL_TPO_TD028TTEC1=m
+CONFIG_FB_OMAP2_PANEL_TPO_TD043MTEA1=m
+CONFIG_FB_OMAP2_PANEL_NEC_NL8048HL11=m
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LCD_CLASS_DEVICE=y
CONFIG_LCD_PLATFORM=y
diff --git a/arch/arm/crypto/aes-ce-glue.c b/arch/arm/crypto/aes-ce-glue.c
index b445a5d..89a3a3e 100644
--- a/arch/arm/crypto/aes-ce-glue.c
+++ b/arch/arm/crypto/aes-ce-glue.c
@@ -364,7 +364,7 @@
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = 0,
.setkey = ce_aes_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
@@ -441,7 +441,7 @@
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = 0,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
diff --git a/arch/arm/include/asm/arch_gicv3.h b/arch/arm/include/asm/arch_gicv3.h
index 7da5503..e08d151 100644
--- a/arch/arm/include/asm/arch_gicv3.h
+++ b/arch/arm/include/asm/arch_gicv3.h
@@ -117,6 +117,7 @@
u32 irqstat;
asm volatile("mrc " __stringify(ICC_IAR1) : "=r" (irqstat));
+ dsb(sy);
return irqstat;
}
diff --git a/arch/arm/include/asm/kvm_asm.h b/arch/arm/include/asm/kvm_asm.h
index 194c91b..15d58b4 100644
--- a/arch/arm/include/asm/kvm_asm.h
+++ b/arch/arm/include/asm/kvm_asm.h
@@ -19,38 +19,7 @@
#ifndef __ARM_KVM_ASM_H__
#define __ARM_KVM_ASM_H__
-/* 0 is reserved as an invalid value. */
-#define c0_MPIDR 1 /* MultiProcessor ID Register */
-#define c0_CSSELR 2 /* Cache Size Selection Register */
-#define c1_SCTLR 3 /* System Control Register */
-#define c1_ACTLR 4 /* Auxiliary Control Register */
-#define c1_CPACR 5 /* Coprocessor Access Control */
-#define c2_TTBR0 6 /* Translation Table Base Register 0 */
-#define c2_TTBR0_high 7 /* TTBR0 top 32 bits */
-#define c2_TTBR1 8 /* Translation Table Base Register 1 */
-#define c2_TTBR1_high 9 /* TTBR1 top 32 bits */
-#define c2_TTBCR 10 /* Translation Table Base Control R. */
-#define c3_DACR 11 /* Domain Access Control Register */
-#define c5_DFSR 12 /* Data Fault Status Register */
-#define c5_IFSR 13 /* Instruction Fault Status Register */
-#define c5_ADFSR 14 /* Auxilary Data Fault Status R */
-#define c5_AIFSR 15 /* Auxilary Instrunction Fault Status R */
-#define c6_DFAR 16 /* Data Fault Address Register */
-#define c6_IFAR 17 /* Instruction Fault Address Register */
-#define c7_PAR 18 /* Physical Address Register */
-#define c7_PAR_high 19 /* PAR top 32 bits */
-#define c9_L2CTLR 20 /* Cortex A15/A7 L2 Control Register */
-#define c10_PRRR 21 /* Primary Region Remap Register */
-#define c10_NMRR 22 /* Normal Memory Remap Register */
-#define c12_VBAR 23 /* Vector Base Address Register */
-#define c13_CID 24 /* Context ID Register */
-#define c13_TID_URW 25 /* Thread ID, User R/W */
-#define c13_TID_URO 26 /* Thread ID, User R/O */
-#define c13_TID_PRIV 27 /* Thread ID, Privileged */
-#define c14_CNTKCTL 28 /* Timer Control Register (PL1) */
-#define c10_AMAIR0 29 /* Auxilary Memory Attribute Indirection Reg0 */
-#define c10_AMAIR1 30 /* Auxilary Memory Attribute Indirection Reg1 */
-#define NR_CP15_REGS 31 /* Number of regs (incl. invalid) */
+#include <asm/virt.h>
#define ARM_EXCEPTION_RESET 0
#define ARM_EXCEPTION_UNDEFINED 1
@@ -86,19 +55,15 @@
extern char __kvm_hyp_init[];
extern char __kvm_hyp_init_end[];
-extern char __kvm_hyp_exit[];
-extern char __kvm_hyp_exit_end[];
-
extern char __kvm_hyp_vector[];
-extern char __kvm_hyp_code_start[];
-extern char __kvm_hyp_code_end[];
-
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+extern void __init_stage2_translation(void);
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h
index 3095df0..ee5328f 100644
--- a/arch/arm/include/asm/kvm_emulate.h
+++ b/arch/arm/include/asm/kvm_emulate.h
@@ -68,12 +68,12 @@
static inline unsigned long *vcpu_pc(struct kvm_vcpu *vcpu)
{
- return &vcpu->arch.regs.usr_regs.ARM_pc;
+ return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_pc;
}
static inline unsigned long *vcpu_cpsr(struct kvm_vcpu *vcpu)
{
- return &vcpu->arch.regs.usr_regs.ARM_cpsr;
+ return &vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr;
}
static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
@@ -83,13 +83,13 @@
static inline bool mode_has_spsr(struct kvm_vcpu *vcpu)
{
- unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+ unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
return (cpsr_mode > USR_MODE && cpsr_mode < SYSTEM_MODE);
}
static inline bool vcpu_mode_priv(struct kvm_vcpu *vcpu)
{
- unsigned long cpsr_mode = vcpu->arch.regs.usr_regs.ARM_cpsr & MODE_MASK;
+ unsigned long cpsr_mode = vcpu->arch.ctxt.gp_regs.usr_regs.ARM_cpsr & MODE_MASK;
return cpsr_mode > USR_MODE;;
}
@@ -108,11 +108,6 @@
return ((phys_addr_t)vcpu->arch.fault.hpfar & HPFAR_MASK) << 8;
}
-static inline unsigned long kvm_vcpu_get_hyp_pc(struct kvm_vcpu *vcpu)
-{
- return vcpu->arch.fault.hyp_pc;
-}
-
static inline bool kvm_vcpu_dabt_isvalid(struct kvm_vcpu *vcpu)
{
return kvm_vcpu_get_hsr(vcpu) & HSR_ISV;
@@ -143,6 +138,11 @@
return kvm_vcpu_get_hsr(vcpu) & HSR_DABT_S1PTW;
}
+static inline bool kvm_vcpu_dabt_is_cm(struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & HSR_DABT_CM);
+}
+
/* Get Access Size from a data abort */
static inline int kvm_vcpu_dabt_get_as(struct kvm_vcpu *vcpu)
{
@@ -192,7 +192,7 @@
static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.cp15[c0_MPIDR] & MPIDR_HWID_BITMASK;
+ return vcpu_cp15(vcpu, c0_MPIDR) & MPIDR_HWID_BITMASK;
}
static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h
index f9f2779..3850701 100644
--- a/arch/arm/include/asm/kvm_host.h
+++ b/arch/arm/include/asm/kvm_host.h
@@ -85,20 +85,61 @@
u32 hsr; /* Hyp Syndrome Register */
u32 hxfar; /* Hyp Data/Inst. Fault Address Register */
u32 hpfar; /* Hyp IPA Fault Address Register */
- u32 hyp_pc; /* PC when exception was taken from Hyp mode */
};
-typedef struct vfp_hard_struct kvm_cpu_context_t;
+/*
+ * 0 is reserved as an invalid value.
+ * Order should be kept in sync with the save/restore code.
+ */
+enum vcpu_sysreg {
+ __INVALID_SYSREG__,
+ c0_MPIDR, /* MultiProcessor ID Register */
+ c0_CSSELR, /* Cache Size Selection Register */
+ c1_SCTLR, /* System Control Register */
+ c1_ACTLR, /* Auxiliary Control Register */
+ c1_CPACR, /* Coprocessor Access Control */
+ c2_TTBR0, /* Translation Table Base Register 0 */
+ c2_TTBR0_high, /* TTBR0 top 32 bits */
+ c2_TTBR1, /* Translation Table Base Register 1 */
+ c2_TTBR1_high, /* TTBR1 top 32 bits */
+ c2_TTBCR, /* Translation Table Base Control R. */
+ c3_DACR, /* Domain Access Control Register */
+ c5_DFSR, /* Data Fault Status Register */
+ c5_IFSR, /* Instruction Fault Status Register */
+ c5_ADFSR, /* Auxilary Data Fault Status R */
+ c5_AIFSR, /* Auxilary Instrunction Fault Status R */
+ c6_DFAR, /* Data Fault Address Register */
+ c6_IFAR, /* Instruction Fault Address Register */
+ c7_PAR, /* Physical Address Register */
+ c7_PAR_high, /* PAR top 32 bits */
+ c9_L2CTLR, /* Cortex A15/A7 L2 Control Register */
+ c10_PRRR, /* Primary Region Remap Register */
+ c10_NMRR, /* Normal Memory Remap Register */
+ c12_VBAR, /* Vector Base Address Register */
+ c13_CID, /* Context ID Register */
+ c13_TID_URW, /* Thread ID, User R/W */
+ c13_TID_URO, /* Thread ID, User R/O */
+ c13_TID_PRIV, /* Thread ID, Privileged */
+ c14_CNTKCTL, /* Timer Control Register (PL1) */
+ c10_AMAIR0, /* Auxilary Memory Attribute Indirection Reg0 */
+ c10_AMAIR1, /* Auxilary Memory Attribute Indirection Reg1 */
+ NR_CP15_REGS /* Number of regs (incl. invalid) */
+};
+
+struct kvm_cpu_context {
+ struct kvm_regs gp_regs;
+ struct vfp_hard_struct vfp;
+ u32 cp15[NR_CP15_REGS];
+};
+
+typedef struct kvm_cpu_context kvm_cpu_context_t;
struct kvm_vcpu_arch {
- struct kvm_regs regs;
+ struct kvm_cpu_context ctxt;
int target; /* Processor target */
DECLARE_BITMAP(features, KVM_VCPU_MAX_FEATURES);
- /* System control coprocessor (cp15) */
- u32 cp15[NR_CP15_REGS];
-
/* The CPU type we expose to the VM */
u32 midr;
@@ -111,9 +152,6 @@
/* Exception Information */
struct kvm_vcpu_fault_info fault;
- /* Floating point registers (VFP and Advanced SIMD/NEON) */
- struct vfp_hard_struct vfp_guest;
-
/* Host FP context */
kvm_cpu_context_t *host_cpu_context;
@@ -158,12 +196,14 @@
u64 exits;
};
+#define vcpu_cp15(v,r) (v)->arch.ctxt.cp15[r]
+
int kvm_vcpu_preferred_target(struct kvm_vcpu_init *init);
unsigned long kvm_arm_num_regs(struct kvm_vcpu *vcpu);
int kvm_arm_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices);
int kvm_arm_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
int kvm_arm_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg);
-u64 kvm_call_hyp(void *hypfn, ...);
+unsigned long kvm_call_hyp(void *hypfn, ...);
void force_vm_exit(const cpumask_t *mask);
#define KVM_ARCH_WANT_MMU_NOTIFIER
@@ -220,6 +260,11 @@
kvm_call_hyp((void*)hyp_stack_ptr, vector_ptr, pgd_ptr);
}
+static inline void __cpu_init_stage2(void)
+{
+ kvm_call_hyp(__init_stage2_translation);
+}
+
static inline int kvm_arch_dev_ioctl_check_extension(long ext)
{
return 0;
@@ -242,5 +287,20 @@
static inline void kvm_arm_setup_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_clear_debug(struct kvm_vcpu *vcpu) {}
static inline void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu) {}
+static inline int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
+static inline int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
+static inline int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
#endif /* __ARM_KVM_HOST_H__ */
diff --git a/arch/arm/include/asm/kvm_hyp.h b/arch/arm/include/asm/kvm_hyp.h
new file mode 100644
index 0000000..f0e8607
--- /dev/null
+++ b/arch/arm/include/asm/kvm_hyp.h
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM_KVM_HYP_H__
+#define __ARM_KVM_HYP_H__
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
+#include <asm/vfp.h>
+
+#define __hyp_text __section(.hyp.text) notrace
+
+#define kern_hyp_va(v) (v)
+#define hyp_kern_va(v) (v)
+
+#define __ACCESS_CP15(CRn, Op1, CRm, Op2) \
+ "mrc", "mcr", __stringify(p15, Op1, %0, CRn, CRm, Op2), u32
+#define __ACCESS_CP15_64(Op1, CRm) \
+ "mrrc", "mcrr", __stringify(p15, Op1, %Q0, %R0, CRm), u64
+#define __ACCESS_VFP(CRn) \
+ "mrc", "mcr", __stringify(p10, 7, %0, CRn, cr0, 0), u32
+
+#define __write_sysreg(v, r, w, c, t) asm volatile(w " " c : : "r" ((t)(v)))
+#define write_sysreg(v, ...) __write_sysreg(v, __VA_ARGS__)
+
+#define __read_sysreg(r, w, c, t) ({ \
+ t __val; \
+ asm volatile(r " " c : "=r" (__val)); \
+ __val; \
+})
+#define read_sysreg(...) __read_sysreg(__VA_ARGS__)
+
+#define write_special(v, r) \
+ asm volatile("msr " __stringify(r) ", %0" : : "r" (v))
+#define read_special(r) ({ \
+ u32 __val; \
+ asm volatile("mrs %0, " __stringify(r) : "=r" (__val)); \
+ __val; \
+})
+
+#define TTBR0 __ACCESS_CP15_64(0, c2)
+#define TTBR1 __ACCESS_CP15_64(1, c2)
+#define VTTBR __ACCESS_CP15_64(6, c2)
+#define PAR __ACCESS_CP15_64(0, c7)
+#define CNTV_CVAL __ACCESS_CP15_64(3, c14)
+#define CNTVOFF __ACCESS_CP15_64(4, c14)
+
+#define MIDR __ACCESS_CP15(c0, 0, c0, 0)
+#define CSSELR __ACCESS_CP15(c0, 2, c0, 0)
+#define VPIDR __ACCESS_CP15(c0, 4, c0, 0)
+#define VMPIDR __ACCESS_CP15(c0, 4, c0, 5)
+#define SCTLR __ACCESS_CP15(c1, 0, c0, 0)
+#define CPACR __ACCESS_CP15(c1, 0, c0, 2)
+#define HCR __ACCESS_CP15(c1, 4, c1, 0)
+#define HDCR __ACCESS_CP15(c1, 4, c1, 1)
+#define HCPTR __ACCESS_CP15(c1, 4, c1, 2)
+#define HSTR __ACCESS_CP15(c1, 4, c1, 3)
+#define TTBCR __ACCESS_CP15(c2, 0, c0, 2)
+#define HTCR __ACCESS_CP15(c2, 4, c0, 2)
+#define VTCR __ACCESS_CP15(c2, 4, c1, 2)
+#define DACR __ACCESS_CP15(c3, 0, c0, 0)
+#define DFSR __ACCESS_CP15(c5, 0, c0, 0)
+#define IFSR __ACCESS_CP15(c5, 0, c0, 1)
+#define ADFSR __ACCESS_CP15(c5, 0, c1, 0)
+#define AIFSR __ACCESS_CP15(c5, 0, c1, 1)
+#define HSR __ACCESS_CP15(c5, 4, c2, 0)
+#define DFAR __ACCESS_CP15(c6, 0, c0, 0)
+#define IFAR __ACCESS_CP15(c6, 0, c0, 2)
+#define HDFAR __ACCESS_CP15(c6, 4, c0, 0)
+#define HIFAR __ACCESS_CP15(c6, 4, c0, 2)
+#define HPFAR __ACCESS_CP15(c6, 4, c0, 4)
+#define ICIALLUIS __ACCESS_CP15(c7, 0, c1, 0)
+#define ATS1CPR __ACCESS_CP15(c7, 0, c8, 0)
+#define TLBIALLIS __ACCESS_CP15(c8, 0, c3, 0)
+#define TLBIALLNSNHIS __ACCESS_CP15(c8, 4, c3, 4)
+#define PRRR __ACCESS_CP15(c10, 0, c2, 0)
+#define NMRR __ACCESS_CP15(c10, 0, c2, 1)
+#define AMAIR0 __ACCESS_CP15(c10, 0, c3, 0)
+#define AMAIR1 __ACCESS_CP15(c10, 0, c3, 1)
+#define VBAR __ACCESS_CP15(c12, 0, c0, 0)
+#define CID __ACCESS_CP15(c13, 0, c0, 1)
+#define TID_URW __ACCESS_CP15(c13, 0, c0, 2)
+#define TID_URO __ACCESS_CP15(c13, 0, c0, 3)
+#define TID_PRIV __ACCESS_CP15(c13, 0, c0, 4)
+#define HTPIDR __ACCESS_CP15(c13, 4, c0, 2)
+#define CNTKCTL __ACCESS_CP15(c14, 0, c1, 0)
+#define CNTV_CTL __ACCESS_CP15(c14, 0, c3, 1)
+#define CNTHCTL __ACCESS_CP15(c14, 4, c1, 0)
+
+#define VFP_FPEXC __ACCESS_VFP(FPEXC)
+
+/* AArch64 compatibility macros, only for the timer so far */
+#define read_sysreg_el0(r) read_sysreg(r##_el0)
+#define write_sysreg_el0(v, r) write_sysreg(v, r##_el0)
+
+#define cntv_ctl_el0 CNTV_CTL
+#define cntv_cval_el0 CNTV_CVAL
+#define cntvoff_el2 CNTVOFF
+#define cnthctl_el2 CNTHCTL
+
+void __timer_save_state(struct kvm_vcpu *vcpu);
+void __timer_restore_state(struct kvm_vcpu *vcpu);
+
+void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+
+void __sysreg_save_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
+
+void asmlinkage __vfp_save_state(struct vfp_hard_struct *vfp);
+void asmlinkage __vfp_restore_state(struct vfp_hard_struct *vfp);
+static inline bool __vfp_enabled(void)
+{
+ return !(read_sysreg(HCPTR) & (HCPTR_TCP(11) | HCPTR_TCP(10)));
+}
+
+void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt);
+void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt);
+
+int asmlinkage __guest_enter(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_context *host);
+int asmlinkage __hyp_do_panic(const char *, int, u32);
+
+#endif /* __ARM_KVM_HYP_H__ */
diff --git a/arch/arm/include/asm/kvm_mmu.h b/arch/arm/include/asm/kvm_mmu.h
index a520b79..da44be9 100644
--- a/arch/arm/include/asm/kvm_mmu.h
+++ b/arch/arm/include/asm/kvm_mmu.h
@@ -179,7 +179,7 @@
static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
{
- return (vcpu->arch.cp15[c1_SCTLR] & 0b101) == 0b101;
+ return (vcpu_cp15(vcpu, c1_SCTLR) & 0b101) == 0b101;
}
static inline void __coherent_cache_guest_page(struct kvm_vcpu *vcpu,
diff --git a/arch/arm/include/asm/virt.h b/arch/arm/include/asm/virt.h
index 4371f45..d4ceaf5 100644
--- a/arch/arm/include/asm/virt.h
+++ b/arch/arm/include/asm/virt.h
@@ -74,6 +74,15 @@
{
return !!(__boot_cpu_mode & BOOT_CPU_MODE_MISMATCH);
}
+
+static inline bool is_kernel_in_hyp_mode(void)
+{
+ return false;
+}
+
+/* The section containing the hypervisor text */
+extern char __hyp_text_start[];
+extern char __hyp_text_end[];
#endif
#endif /* __ASSEMBLY__ */
diff --git a/arch/arm/include/asm/xen/page-coherent.h b/arch/arm/include/asm/xen/page-coherent.h
index 0375c8c..9408a99 100644
--- a/arch/arm/include/asm/xen/page-coherent.h
+++ b/arch/arm/include/asm/xen/page-coherent.h
@@ -35,14 +35,21 @@
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, struct dma_attrs *attrs)
{
- bool local = XEN_PFN_DOWN(dev_addr) == page_to_xen_pfn(page);
+ unsigned long page_pfn = page_to_xen_pfn(page);
+ unsigned long dev_pfn = XEN_PFN_DOWN(dev_addr);
+ unsigned long compound_pages =
+ (1<<compound_order(page)) * XEN_PFN_PER_PAGE;
+ bool local = (page_pfn <= dev_pfn) &&
+ (dev_pfn - page_pfn < compound_pages);
+
/*
- * Dom0 is mapped 1:1, while the Linux page can be spanned accross
- * multiple Xen page, it's not possible to have a mix of local and
- * foreign Xen page. So if the first xen_pfn == mfn the page is local
- * otherwise it's a foreign page grant-mapped in dom0. If the page is
- * local we can safely call the native dma_ops function, otherwise we
- * call the xen specific function.
+ * Dom0 is mapped 1:1, while the Linux page can span across
+ * multiple Xen pages, it's not possible for it to contain a
+ * mix of local and foreign Xen pages. So if the first xen_pfn
+ * == mfn the page is local otherwise it's a foreign page
+ * grant-mapped in dom0. If the page is local we can safely
+ * call the native dma_ops function, otherwise we call the xen
+ * specific function.
*/
if (local)
__generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
diff --git a/arch/arm/kernel/asm-offsets.c b/arch/arm/kernel/asm-offsets.c
index 871b826..27d0581 100644
--- a/arch/arm/kernel/asm-offsets.c
+++ b/arch/arm/kernel/asm-offsets.c
@@ -170,41 +170,11 @@
DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
BLANK();
#ifdef CONFIG_KVM_ARM_HOST
- DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
- DEFINE(VCPU_MIDR, offsetof(struct kvm_vcpu, arch.midr));
- DEFINE(VCPU_CP15, offsetof(struct kvm_vcpu, arch.cp15));
- DEFINE(VCPU_VFP_GUEST, offsetof(struct kvm_vcpu, arch.vfp_guest));
- DEFINE(VCPU_VFP_HOST, offsetof(struct kvm_vcpu, arch.host_cpu_context));
- DEFINE(VCPU_REGS, offsetof(struct kvm_vcpu, arch.regs));
- DEFINE(VCPU_USR_REGS, offsetof(struct kvm_vcpu, arch.regs.usr_regs));
- DEFINE(VCPU_SVC_REGS, offsetof(struct kvm_vcpu, arch.regs.svc_regs));
- DEFINE(VCPU_ABT_REGS, offsetof(struct kvm_vcpu, arch.regs.abt_regs));
- DEFINE(VCPU_UND_REGS, offsetof(struct kvm_vcpu, arch.regs.und_regs));
- DEFINE(VCPU_IRQ_REGS, offsetof(struct kvm_vcpu, arch.regs.irq_regs));
- DEFINE(VCPU_FIQ_REGS, offsetof(struct kvm_vcpu, arch.regs.fiq_regs));
- DEFINE(VCPU_PC, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_pc));
- DEFINE(VCPU_CPSR, offsetof(struct kvm_vcpu, arch.regs.usr_regs.ARM_cpsr));
- DEFINE(VCPU_HCR, offsetof(struct kvm_vcpu, arch.hcr));
- DEFINE(VCPU_IRQ_LINES, offsetof(struct kvm_vcpu, arch.irq_lines));
- DEFINE(VCPU_HSR, offsetof(struct kvm_vcpu, arch.fault.hsr));
- DEFINE(VCPU_HxFAR, offsetof(struct kvm_vcpu, arch.fault.hxfar));
- DEFINE(VCPU_HPFAR, offsetof(struct kvm_vcpu, arch.fault.hpfar));
- DEFINE(VCPU_HYP_PC, offsetof(struct kvm_vcpu, arch.fault.hyp_pc));
- DEFINE(VCPU_VGIC_CPU, offsetof(struct kvm_vcpu, arch.vgic_cpu));
- DEFINE(VGIC_V2_CPU_HCR, offsetof(struct vgic_cpu, vgic_v2.vgic_hcr));
- DEFINE(VGIC_V2_CPU_VMCR, offsetof(struct vgic_cpu, vgic_v2.vgic_vmcr));
- DEFINE(VGIC_V2_CPU_MISR, offsetof(struct vgic_cpu, vgic_v2.vgic_misr));
- DEFINE(VGIC_V2_CPU_EISR, offsetof(struct vgic_cpu, vgic_v2.vgic_eisr));
- DEFINE(VGIC_V2_CPU_ELRSR, offsetof(struct vgic_cpu, vgic_v2.vgic_elrsr));
- DEFINE(VGIC_V2_CPU_APR, offsetof(struct vgic_cpu, vgic_v2.vgic_apr));
- DEFINE(VGIC_V2_CPU_LR, offsetof(struct vgic_cpu, vgic_v2.vgic_lr));
- DEFINE(VGIC_CPU_NR_LR, offsetof(struct vgic_cpu, nr_lr));
- DEFINE(VCPU_TIMER_CNTV_CTL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_ctl));
- DEFINE(VCPU_TIMER_CNTV_CVAL, offsetof(struct kvm_vcpu, arch.timer_cpu.cntv_cval));
- DEFINE(KVM_TIMER_CNTVOFF, offsetof(struct kvm, arch.timer.cntvoff));
- DEFINE(KVM_TIMER_ENABLED, offsetof(struct kvm, arch.timer.enabled));
- DEFINE(KVM_VGIC_VCTRL, offsetof(struct kvm, arch.vgic.vctrl_base));
- DEFINE(KVM_VTTBR, offsetof(struct kvm, arch.vttbr));
+ DEFINE(VCPU_GUEST_CTXT, offsetof(struct kvm_vcpu, arch.ctxt));
+ DEFINE(VCPU_HOST_CTXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
+ DEFINE(CPU_CTXT_VFP, offsetof(struct kvm_cpu_context, vfp));
+ DEFINE(CPU_CTXT_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
+ DEFINE(GP_REGS_USR, offsetof(struct kvm_regs, usr_regs));
#endif
BLANK();
#ifdef CONFIG_VDSO
diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
index 8b60fde..b4139cb 100644
--- a/arch/arm/kernel/vmlinux.lds.S
+++ b/arch/arm/kernel/vmlinux.lds.S
@@ -18,6 +18,11 @@
*(.proc.info.init) \
VMLINUX_SYMBOL(__proc_info_end) = .;
+#define HYPERVISOR_TEXT \
+ VMLINUX_SYMBOL(__hyp_text_start) = .; \
+ *(.hyp.text) \
+ VMLINUX_SYMBOL(__hyp_text_end) = .;
+
#define IDMAP_TEXT \
ALIGN_FUNCTION(); \
VMLINUX_SYMBOL(__idmap_text_start) = .; \
@@ -108,6 +113,7 @@
TEXT_TEXT
SCHED_TEXT
LOCK_TEXT
+ HYPERVISOR_TEXT
KPROBES_TEXT
*(.gnu.warning)
*(.glue_7)
diff --git a/arch/arm/kvm/Makefile b/arch/arm/kvm/Makefile
index c5eef02c..eb1bf43 100644
--- a/arch/arm/kvm/Makefile
+++ b/arch/arm/kvm/Makefile
@@ -17,6 +17,7 @@
KVM := ../../../virt/kvm
kvm-arm-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o $(KVM)/eventfd.o $(KVM)/vfio.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp/
obj-y += kvm-arm.o init.o interrupts.o
obj-y += arm.o handle_exit.o guest.o mmu.o emulate.o reset.o
obj-y += coproc.o coproc_a15.o coproc_a7.o mmio.o psci.o perf.o
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c
index dda1959..9ca653e 100644
--- a/arch/arm/kvm/arm.c
+++ b/arch/arm/kvm/arm.c
@@ -28,6 +28,7 @@
#include <linux/sched.h>
#include <linux/kvm.h>
#include <trace/events/kvm.h>
+#include <kvm/arm_pmu.h>
#define CREATE_TRACE_POINTS
#include "trace.h"
@@ -265,6 +266,7 @@
kvm_mmu_free_memory_caches(vcpu);
kvm_timer_vcpu_terminate(vcpu);
kvm_vgic_vcpu_destroy(vcpu);
+ kvm_pmu_vcpu_destroy(vcpu);
kmem_cache_free(kvm_vcpu_cache, vcpu);
}
@@ -320,6 +322,7 @@
vcpu->cpu = -1;
kvm_arm_set_running_vcpu(NULL);
+ kvm_timer_vcpu_put(vcpu);
}
int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
@@ -577,6 +580,7 @@
* non-preemptible context.
*/
preempt_disable();
+ kvm_pmu_flush_hwstate(vcpu);
kvm_timer_flush_hwstate(vcpu);
kvm_vgic_flush_hwstate(vcpu);
@@ -593,6 +597,7 @@
if (ret <= 0 || need_new_vmid_gen(vcpu->kvm) ||
vcpu->arch.power_off || vcpu->arch.pause) {
local_irq_enable();
+ kvm_pmu_sync_hwstate(vcpu);
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
preempt_enable();
@@ -642,10 +647,11 @@
trace_kvm_exit(ret, kvm_vcpu_trap_get_class(vcpu), *vcpu_pc(vcpu));
/*
- * We must sync the timer state before the vgic state so that
- * the vgic can properly sample the updated state of the
+ * We must sync the PMU and timer state before the vgic state so
+ * that the vgic can properly sample the updated state of the
* interrupt line.
*/
+ kvm_pmu_sync_hwstate(vcpu);
kvm_timer_sync_hwstate(vcpu);
kvm_vgic_sync_hwstate(vcpu);
@@ -823,11 +829,54 @@
return 0;
}
+static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int ret = -ENXIO;
+
+ switch (attr->group) {
+ default:
+ ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr);
+ break;
+ }
+
+ return ret;
+}
+
+static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int ret = -ENXIO;
+
+ switch (attr->group) {
+ default:
+ ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr);
+ break;
+ }
+
+ return ret;
+}
+
+static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int ret = -ENXIO;
+
+ switch (attr->group) {
+ default:
+ ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr);
+ break;
+ }
+
+ return ret;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
struct kvm_vcpu *vcpu = filp->private_data;
void __user *argp = (void __user *)arg;
+ struct kvm_device_attr attr;
switch (ioctl) {
case KVM_ARM_VCPU_INIT: {
@@ -870,6 +919,21 @@
return -E2BIG;
return kvm_arm_copy_reg_indices(vcpu, user_list->reg);
}
+ case KVM_SET_DEVICE_ATTR: {
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ return -EFAULT;
+ return kvm_arm_vcpu_set_attr(vcpu, &attr);
+ }
+ case KVM_GET_DEVICE_ATTR: {
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ return -EFAULT;
+ return kvm_arm_vcpu_get_attr(vcpu, &attr);
+ }
+ case KVM_HAS_DEVICE_ATTR: {
+ if (copy_from_user(&attr, argp, sizeof(attr)))
+ return -EFAULT;
+ return kvm_arm_vcpu_has_attr(vcpu, &attr);
+ }
default:
return -EINVAL;
}
@@ -967,6 +1031,11 @@
}
}
+static void cpu_init_stage2(void *dummy)
+{
+ __cpu_init_stage2();
+}
+
static void cpu_init_hyp_mode(void *dummy)
{
phys_addr_t boot_pgd_ptr;
@@ -985,6 +1054,7 @@
vector_ptr = (unsigned long)__kvm_hyp_vector;
__cpu_init_hyp_mode(boot_pgd_ptr, pgd_ptr, hyp_stack_ptr, vector_ptr);
+ __cpu_init_stage2();
kvm_arm_init_debug();
}
@@ -1035,6 +1105,82 @@
}
#endif
+static void teardown_common_resources(void)
+{
+ free_percpu(kvm_host_cpu_state);
+}
+
+static int init_common_resources(void)
+{
+ kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
+ if (!kvm_host_cpu_state) {
+ kvm_err("Cannot allocate host CPU state\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static int init_subsystems(void)
+{
+ int err;
+
+ /*
+ * Init HYP view of VGIC
+ */
+ err = kvm_vgic_hyp_init();
+ switch (err) {
+ case 0:
+ vgic_present = true;
+ break;
+ case -ENODEV:
+ case -ENXIO:
+ vgic_present = false;
+ break;
+ default:
+ return err;
+ }
+
+ /*
+ * Init HYP architected timer support
+ */
+ err = kvm_timer_hyp_init();
+ if (err)
+ return err;
+
+ kvm_perf_init();
+ kvm_coproc_table_init();
+
+ return 0;
+}
+
+static void teardown_hyp_mode(void)
+{
+ int cpu;
+
+ if (is_kernel_in_hyp_mode())
+ return;
+
+ free_hyp_pgds();
+ for_each_possible_cpu(cpu)
+ free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+}
+
+static int init_vhe_mode(void)
+{
+ /*
+ * Execute the init code on each CPU.
+ */
+ on_each_cpu(cpu_init_stage2, NULL, 1);
+
+ /* set size of VMID supported by CPU */
+ kvm_vmid_bits = kvm_get_vmid_bits();
+ kvm_info("%d-bit VMID\n", kvm_vmid_bits);
+
+ kvm_info("VHE mode initialized successfully\n");
+ return 0;
+}
+
/**
* Inits Hyp-mode on all online CPUs
*/
@@ -1065,7 +1211,7 @@
stack_page = __get_free_page(GFP_KERNEL);
if (!stack_page) {
err = -ENOMEM;
- goto out_free_stack_pages;
+ goto out_err;
}
per_cpu(kvm_arm_hyp_stack_page, cpu) = stack_page;
@@ -1074,16 +1220,16 @@
/*
* Map the Hyp-code called directly from the host
*/
- err = create_hyp_mappings(__kvm_hyp_code_start, __kvm_hyp_code_end);
+ err = create_hyp_mappings(__hyp_text_start, __hyp_text_end);
if (err) {
kvm_err("Cannot map world-switch code\n");
- goto out_free_mappings;
+ goto out_err;
}
err = create_hyp_mappings(__start_rodata, __end_rodata);
if (err) {
kvm_err("Cannot map rodata section\n");
- goto out_free_mappings;
+ goto out_err;
}
/*
@@ -1095,20 +1241,10 @@
if (err) {
kvm_err("Cannot map hyp stack\n");
- goto out_free_mappings;
+ goto out_err;
}
}
- /*
- * Map the host CPU structures
- */
- kvm_host_cpu_state = alloc_percpu(kvm_cpu_context_t);
- if (!kvm_host_cpu_state) {
- err = -ENOMEM;
- kvm_err("Cannot allocate host CPU state\n");
- goto out_free_mappings;
- }
-
for_each_possible_cpu(cpu) {
kvm_cpu_context_t *cpu_ctxt;
@@ -1117,7 +1253,7 @@
if (err) {
kvm_err("Cannot map host CPU state: %d\n", err);
- goto out_free_context;
+ goto out_err;
}
}
@@ -1126,34 +1262,22 @@
*/
on_each_cpu(cpu_init_hyp_mode, NULL, 1);
- /*
- * Init HYP view of VGIC
- */
- err = kvm_vgic_hyp_init();
- switch (err) {
- case 0:
- vgic_present = true;
- break;
- case -ENODEV:
- case -ENXIO:
- vgic_present = false;
- break;
- default:
- goto out_free_context;
- }
-
- /*
- * Init HYP architected timer support
- */
- err = kvm_timer_hyp_init();
- if (err)
- goto out_free_context;
-
#ifndef CONFIG_HOTPLUG_CPU
free_boot_hyp_pgd();
#endif
- kvm_perf_init();
+ cpu_notifier_register_begin();
+
+ err = __register_cpu_notifier(&hyp_init_cpu_nb);
+
+ cpu_notifier_register_done();
+
+ if (err) {
+ kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
+ goto out_err;
+ }
+
+ hyp_cpu_pm_init();
/* set size of VMID supported by CPU */
kvm_vmid_bits = kvm_get_vmid_bits();
@@ -1162,14 +1286,9 @@
kvm_info("Hyp mode initialized successfully\n");
return 0;
-out_free_context:
- free_percpu(kvm_host_cpu_state);
-out_free_mappings:
- free_hyp_pgds();
-out_free_stack_pages:
- for_each_possible_cpu(cpu)
- free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
+
out_err:
+ teardown_hyp_mode();
kvm_err("error initializing Hyp mode: %d\n", err);
return err;
}
@@ -1213,26 +1332,27 @@
}
}
- cpu_notifier_register_begin();
+ err = init_common_resources();
+ if (err)
+ return err;
- err = init_hyp_mode();
+ if (is_kernel_in_hyp_mode())
+ err = init_vhe_mode();
+ else
+ err = init_hyp_mode();
if (err)
goto out_err;
- err = __register_cpu_notifier(&hyp_init_cpu_nb);
- if (err) {
- kvm_err("Cannot register HYP init CPU notifier (%d)\n", err);
- goto out_err;
- }
+ err = init_subsystems();
+ if (err)
+ goto out_hyp;
- cpu_notifier_register_done();
-
- hyp_cpu_pm_init();
-
- kvm_coproc_table_init();
return 0;
+
+out_hyp:
+ teardown_hyp_mode();
out_err:
- cpu_notifier_register_done();
+ teardown_common_resources();
return err;
}
diff --git a/arch/arm/kvm/coproc.c b/arch/arm/kvm/coproc.c
index f3d88dc..1bb2b79 100644
--- a/arch/arm/kvm/coproc.c
+++ b/arch/arm/kvm/coproc.c
@@ -16,6 +16,8 @@
* along with this program; if not, write to the Free Software
* Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
*/
+
+#include <linux/bsearch.h>
#include <linux/mm.h>
#include <linux/kvm_host.h>
#include <linux/uaccess.h>
@@ -54,8 +56,8 @@
const struct coproc_reg *r,
u64 val)
{
- vcpu->arch.cp15[r->reg] = val & 0xffffffff;
- vcpu->arch.cp15[r->reg + 1] = val >> 32;
+ vcpu_cp15(vcpu, r->reg) = val & 0xffffffff;
+ vcpu_cp15(vcpu, r->reg + 1) = val >> 32;
}
static inline u64 vcpu_cp15_reg64_get(struct kvm_vcpu *vcpu,
@@ -63,9 +65,9 @@
{
u64 val;
- val = vcpu->arch.cp15[r->reg + 1];
+ val = vcpu_cp15(vcpu, r->reg + 1);
val = val << 32;
- val = val | vcpu->arch.cp15[r->reg];
+ val = val | vcpu_cp15(vcpu, r->reg);
return val;
}
@@ -104,7 +106,7 @@
* vcpu_id, but we read the 'U' bit from the underlying
* hardware directly.
*/
- vcpu->arch.cp15[c0_MPIDR] = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
+ vcpu_cp15(vcpu, c0_MPIDR) = ((read_cpuid_mpidr() & MPIDR_SMP_BITMASK) |
((vcpu->vcpu_id >> 2) << MPIDR_LEVEL_BITS) |
(vcpu->vcpu_id & 3));
}
@@ -117,7 +119,7 @@
if (p->is_write)
return ignore_write(vcpu, p);
- *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c1_ACTLR];
+ *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c1_ACTLR);
return true;
}
@@ -139,7 +141,7 @@
if (p->is_write)
return ignore_write(vcpu, p);
- *vcpu_reg(vcpu, p->Rt1) = vcpu->arch.cp15[c9_L2CTLR];
+ *vcpu_reg(vcpu, p->Rt1) = vcpu_cp15(vcpu, c9_L2CTLR);
return true;
}
@@ -156,7 +158,7 @@
ncores = min(ncores, 3U);
l2ctlr |= (ncores & 3) << 24;
- vcpu->arch.cp15[c9_L2CTLR] = l2ctlr;
+ vcpu_cp15(vcpu, c9_L2CTLR) = l2ctlr;
}
static void reset_actlr(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
@@ -171,7 +173,7 @@
else
actlr &= ~(1U << 6);
- vcpu->arch.cp15[c1_ACTLR] = actlr;
+ vcpu_cp15(vcpu, c1_ACTLR) = actlr;
}
/*
@@ -218,9 +220,9 @@
BUG_ON(!p->is_write);
- vcpu->arch.cp15[r->reg] = *vcpu_reg(vcpu, p->Rt1);
+ vcpu_cp15(vcpu, r->reg) = *vcpu_reg(vcpu, p->Rt1);
if (p->is_64bit)
- vcpu->arch.cp15[r->reg + 1] = *vcpu_reg(vcpu, p->Rt2);
+ vcpu_cp15(vcpu, r->reg + 1) = *vcpu_reg(vcpu, p->Rt2);
kvm_toggle_cache(vcpu, was_enabled);
return true;
@@ -381,17 +383,26 @@
{ CRn(15), CRm( 0), Op1( 4), Op2( 0), is32, access_cbar},
};
+static int check_reg_table(const struct coproc_reg *table, unsigned int n)
+{
+ unsigned int i;
+
+ for (i = 1; i < n; i++) {
+ if (cmp_reg(&table[i-1], &table[i]) >= 0) {
+ kvm_err("reg table %p out of order (%d)\n", table, i - 1);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
/* Target specific emulation tables */
static struct kvm_coproc_target_table *target_tables[KVM_ARM_NUM_TARGETS];
void kvm_register_target_coproc_table(struct kvm_coproc_target_table *table)
{
- unsigned int i;
-
- for (i = 1; i < table->num; i++)
- BUG_ON(cmp_reg(&table->table[i-1],
- &table->table[i]) >= 0);
-
+ BUG_ON(check_reg_table(table->table, table->num));
target_tables[table->target] = table;
}
@@ -405,29 +416,32 @@
return table->table;
}
+#define reg_to_match_value(x) \
+ ({ \
+ unsigned long val; \
+ val = (x)->CRn << 11; \
+ val |= (x)->CRm << 7; \
+ val |= (x)->Op1 << 4; \
+ val |= (x)->Op2 << 1; \
+ val |= !(x)->is_64bit; \
+ val; \
+ })
+
+static int match_reg(const void *key, const void *elt)
+{
+ const unsigned long pval = (unsigned long)key;
+ const struct coproc_reg *r = elt;
+
+ return pval - reg_to_match_value(r);
+}
+
static const struct coproc_reg *find_reg(const struct coproc_params *params,
const struct coproc_reg table[],
unsigned int num)
{
- unsigned int i;
+ unsigned long pval = reg_to_match_value(params);
- for (i = 0; i < num; i++) {
- const struct coproc_reg *r = &table[i];
-
- if (params->is_64bit != r->is_64)
- continue;
- if (params->CRn != r->CRn)
- continue;
- if (params->CRm != r->CRm)
- continue;
- if (params->Op1 != r->Op1)
- continue;
- if (params->Op2 != r->Op2)
- continue;
-
- return r;
- }
- return NULL;
+ return bsearch((void *)pval, table, num, sizeof(table[0]), match_reg);
}
static int emulate_cp15(struct kvm_vcpu *vcpu,
@@ -645,6 +659,9 @@
{ CRn( 0), CRm( 0), Op1( 0), Op2( 3), is32, NULL, get_TLBTR },
{ CRn( 0), CRm( 0), Op1( 0), Op2( 6), is32, NULL, get_REVIDR },
+ { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
+ { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
+
{ CRn( 0), CRm( 1), Op1( 0), Op2( 0), is32, NULL, get_ID_PFR0 },
{ CRn( 0), CRm( 1), Op1( 0), Op2( 1), is32, NULL, get_ID_PFR1 },
{ CRn( 0), CRm( 1), Op1( 0), Op2( 2), is32, NULL, get_ID_DFR0 },
@@ -660,9 +677,6 @@
{ CRn( 0), CRm( 2), Op1( 0), Op2( 3), is32, NULL, get_ID_ISAR3 },
{ CRn( 0), CRm( 2), Op1( 0), Op2( 4), is32, NULL, get_ID_ISAR4 },
{ CRn( 0), CRm( 2), Op1( 0), Op2( 5), is32, NULL, get_ID_ISAR5 },
-
- { CRn( 0), CRm( 0), Op1( 1), Op2( 1), is32, NULL, get_CLIDR },
- { CRn( 0), CRm( 0), Op1( 1), Op2( 7), is32, NULL, get_AIDR },
};
/*
@@ -901,7 +915,7 @@
if (vfpid < num_fp_regs()) {
if (KVM_REG_SIZE(id) != 8)
return -ENOENT;
- return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpregs[vfpid],
+ return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpregs[vfpid],
id);
}
@@ -911,13 +925,13 @@
switch (vfpid) {
case KVM_REG_ARM_VFP_FPEXC:
- return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpexc, id);
+ return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpexc, id);
case KVM_REG_ARM_VFP_FPSCR:
- return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpscr, id);
+ return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpscr, id);
case KVM_REG_ARM_VFP_FPINST:
- return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst, id);
+ return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst, id);
case KVM_REG_ARM_VFP_FPINST2:
- return reg_to_user(uaddr, &vcpu->arch.vfp_guest.fpinst2, id);
+ return reg_to_user(uaddr, &vcpu->arch.ctxt.vfp.fpinst2, id);
case KVM_REG_ARM_VFP_MVFR0:
val = fmrx(MVFR0);
return reg_to_user(uaddr, &val, id);
@@ -945,7 +959,7 @@
if (vfpid < num_fp_regs()) {
if (KVM_REG_SIZE(id) != 8)
return -ENOENT;
- return reg_from_user(&vcpu->arch.vfp_guest.fpregs[vfpid],
+ return reg_from_user(&vcpu->arch.ctxt.vfp.fpregs[vfpid],
uaddr, id);
}
@@ -955,13 +969,13 @@
switch (vfpid) {
case KVM_REG_ARM_VFP_FPEXC:
- return reg_from_user(&vcpu->arch.vfp_guest.fpexc, uaddr, id);
+ return reg_from_user(&vcpu->arch.ctxt.vfp.fpexc, uaddr, id);
case KVM_REG_ARM_VFP_FPSCR:
- return reg_from_user(&vcpu->arch.vfp_guest.fpscr, uaddr, id);
+ return reg_from_user(&vcpu->arch.ctxt.vfp.fpscr, uaddr, id);
case KVM_REG_ARM_VFP_FPINST:
- return reg_from_user(&vcpu->arch.vfp_guest.fpinst, uaddr, id);
+ return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst, uaddr, id);
case KVM_REG_ARM_VFP_FPINST2:
- return reg_from_user(&vcpu->arch.vfp_guest.fpinst2, uaddr, id);
+ return reg_from_user(&vcpu->arch.ctxt.vfp.fpinst2, uaddr, id);
/* These are invariant. */
case KVM_REG_ARM_VFP_MVFR0:
if (reg_from_user(&val, uaddr, id))
@@ -1030,7 +1044,7 @@
val = vcpu_cp15_reg64_get(vcpu, r);
ret = reg_to_user(uaddr, &val, reg->id);
} else if (KVM_REG_SIZE(reg->id) == 4) {
- ret = reg_to_user(uaddr, &vcpu->arch.cp15[r->reg], reg->id);
+ ret = reg_to_user(uaddr, &vcpu_cp15(vcpu, r->reg), reg->id);
}
return ret;
@@ -1060,7 +1074,7 @@
if (!ret)
vcpu_cp15_reg64_set(vcpu, r, val);
} else if (KVM_REG_SIZE(reg->id) == 4) {
- ret = reg_from_user(&vcpu->arch.cp15[r->reg], uaddr, reg->id);
+ ret = reg_from_user(&vcpu_cp15(vcpu, r->reg), uaddr, reg->id);
}
return ret;
@@ -1096,7 +1110,7 @@
static u64 cp15_to_index(const struct coproc_reg *reg)
{
u64 val = KVM_REG_ARM | (15 << KVM_REG_ARM_COPROC_SHIFT);
- if (reg->is_64) {
+ if (reg->is_64bit) {
val |= KVM_REG_SIZE_U64;
val |= (reg->Op1 << KVM_REG_ARM_OPC1_SHIFT);
/*
@@ -1210,8 +1224,8 @@
unsigned int i;
/* Make sure tables are unique and in order. */
- for (i = 1; i < ARRAY_SIZE(cp15_regs); i++)
- BUG_ON(cmp_reg(&cp15_regs[i-1], &cp15_regs[i]) >= 0);
+ BUG_ON(check_reg_table(cp15_regs, ARRAY_SIZE(cp15_regs)));
+ BUG_ON(check_reg_table(invariant_cp15, ARRAY_SIZE(invariant_cp15)));
/* We abuse the reset function to overwrite the table itself. */
for (i = 0; i < ARRAY_SIZE(invariant_cp15); i++)
@@ -1248,7 +1262,7 @@
const struct coproc_reg *table;
/* Catch someone adding a register without putting in reset entry. */
- memset(vcpu->arch.cp15, 0x42, sizeof(vcpu->arch.cp15));
+ memset(vcpu->arch.ctxt.cp15, 0x42, sizeof(vcpu->arch.ctxt.cp15));
/* Generic chip reset first (so target could override). */
reset_coproc_regs(vcpu, cp15_regs, ARRAY_SIZE(cp15_regs));
@@ -1257,6 +1271,6 @@
reset_coproc_regs(vcpu, table, num);
for (num = 1; num < NR_CP15_REGS; num++)
- if (vcpu->arch.cp15[num] == 0x42424242)
- panic("Didn't reset vcpu->arch.cp15[%zi]", num);
+ if (vcpu_cp15(vcpu, num) == 0x42424242)
+ panic("Didn't reset vcpu_cp15(vcpu, %zi)", num);
}
diff --git a/arch/arm/kvm/coproc.h b/arch/arm/kvm/coproc.h
index 88d24a3..eef1759 100644
--- a/arch/arm/kvm/coproc.h
+++ b/arch/arm/kvm/coproc.h
@@ -37,7 +37,7 @@
unsigned long Op1;
unsigned long Op2;
- bool is_64;
+ bool is_64bit;
/* Trapped access from guest, if non-NULL. */
bool (*access)(struct kvm_vcpu *,
@@ -47,7 +47,7 @@
/* Initialization for vcpu. */
void (*reset)(struct kvm_vcpu *, const struct coproc_reg *);
- /* Index into vcpu->arch.cp15[], or 0 if we don't need to save it. */
+ /* Index into vcpu_cp15(vcpu, ...), or 0 if we don't need to save it. */
unsigned long reg;
/* Value (usually reset value) */
@@ -104,25 +104,25 @@
const struct coproc_reg *r)
{
BUG_ON(!r->reg);
- BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
- vcpu->arch.cp15[r->reg] = 0xdecafbad;
+ BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
+ vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
}
static inline void reset_val(struct kvm_vcpu *vcpu, const struct coproc_reg *r)
{
BUG_ON(!r->reg);
- BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.cp15));
- vcpu->arch.cp15[r->reg] = r->val;
+ BUG_ON(r->reg >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
+ vcpu_cp15(vcpu, r->reg) = r->val;
}
static inline void reset_unknown64(struct kvm_vcpu *vcpu,
const struct coproc_reg *r)
{
BUG_ON(!r->reg);
- BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.cp15));
+ BUG_ON(r->reg + 1 >= ARRAY_SIZE(vcpu->arch.ctxt.cp15));
- vcpu->arch.cp15[r->reg] = 0xdecafbad;
- vcpu->arch.cp15[r->reg+1] = 0xd0c0ffee;
+ vcpu_cp15(vcpu, r->reg) = 0xdecafbad;
+ vcpu_cp15(vcpu, r->reg+1) = 0xd0c0ffee;
}
static inline int cmp_reg(const struct coproc_reg *i1,
@@ -141,7 +141,7 @@
return i1->Op1 - i2->Op1;
if (i1->Op2 != i2->Op2)
return i1->Op2 - i2->Op2;
- return i2->is_64 - i1->is_64;
+ return i2->is_64bit - i1->is_64bit;
}
@@ -150,8 +150,8 @@
#define CRm64(_x) .CRn = _x, .CRm = 0
#define Op1(_x) .Op1 = _x
#define Op2(_x) .Op2 = _x
-#define is64 .is_64 = true
-#define is32 .is_64 = false
+#define is64 .is_64bit = true
+#define is32 .is_64bit = false
bool access_vm_reg(struct kvm_vcpu *vcpu,
const struct coproc_params *p,
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index dc99159..a494def 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -112,7 +112,7 @@
*/
unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
{
- unsigned long *reg_array = (unsigned long *)&vcpu->arch.regs;
+ unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
@@ -147,15 +147,15 @@
unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
switch (mode) {
case SVC_MODE:
- return &vcpu->arch.regs.KVM_ARM_SVC_spsr;
+ return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
case ABT_MODE:
- return &vcpu->arch.regs.KVM_ARM_ABT_spsr;
+ return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
case UND_MODE:
- return &vcpu->arch.regs.KVM_ARM_UND_spsr;
+ return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
case IRQ_MODE:
- return &vcpu->arch.regs.KVM_ARM_IRQ_spsr;
+ return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
case FIQ_MODE:
- return &vcpu->arch.regs.KVM_ARM_FIQ_spsr;
+ return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
default:
BUG();
}
@@ -266,8 +266,8 @@
static u32 exc_vector_base(struct kvm_vcpu *vcpu)
{
- u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
- u32 vbar = vcpu->arch.cp15[c12_VBAR];
+ u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
+ u32 vbar = vcpu_cp15(vcpu, c12_VBAR);
if (sctlr & SCTLR_V)
return 0xffff0000;
@@ -282,7 +282,7 @@
static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
{
unsigned long cpsr = *vcpu_cpsr(vcpu);
- u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
+ u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
*vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
@@ -357,22 +357,22 @@
if (is_pabt) {
/* Set IFAR and IFSR */
- vcpu->arch.cp15[c6_IFAR] = addr;
- is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
+ vcpu_cp15(vcpu, c6_IFAR) = addr;
+ is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
/* Always give debug fault for now - should give guest a clue */
if (is_lpae)
- vcpu->arch.cp15[c5_IFSR] = 1 << 9 | 0x22;
+ vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22;
else
- vcpu->arch.cp15[c5_IFSR] = 2;
+ vcpu_cp15(vcpu, c5_IFSR) = 2;
} else { /* !iabt */
/* Set DFAR and DFSR */
- vcpu->arch.cp15[c6_DFAR] = addr;
- is_lpae = (vcpu->arch.cp15[c2_TTBCR] >> 31);
+ vcpu_cp15(vcpu, c6_DFAR) = addr;
+ is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
/* Always give debug fault for now - should give guest a clue */
if (is_lpae)
- vcpu->arch.cp15[c5_DFSR] = 1 << 9 | 0x22;
+ vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22;
else
- vcpu->arch.cp15[c5_DFSR] = 2;
+ vcpu_cp15(vcpu, c5_DFSR) = 2;
}
}
diff --git a/arch/arm/kvm/guest.c b/arch/arm/kvm/guest.c
index 5fa69d7..12cbb68 100644
--- a/arch/arm/kvm/guest.c
+++ b/arch/arm/kvm/guest.c
@@ -25,7 +25,6 @@
#include <asm/cputype.h>
#include <asm/uaccess.h>
#include <asm/kvm.h>
-#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
#include <asm/kvm_coproc.h>
@@ -55,7 +54,7 @@
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
- struct kvm_regs *regs = &vcpu->arch.regs;
+ struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
u64 off;
if (KVM_REG_SIZE(reg->id) != 4)
@@ -72,7 +71,7 @@
static int set_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
u32 __user *uaddr = (u32 __user *)(long)reg->addr;
- struct kvm_regs *regs = &vcpu->arch.regs;
+ struct kvm_regs *regs = &vcpu->arch.ctxt.gp_regs;
u64 off, val;
if (KVM_REG_SIZE(reg->id) != 4)
diff --git a/arch/arm/kvm/handle_exit.c b/arch/arm/kvm/handle_exit.c
index 3ede90d..3f1ef0d 100644
--- a/arch/arm/kvm/handle_exit.c
+++ b/arch/arm/kvm/handle_exit.c
@@ -147,13 +147,6 @@
switch (exception_index) {
case ARM_EXCEPTION_IRQ:
return 1;
- case ARM_EXCEPTION_UNDEFINED:
- kvm_err("Undefined exception in Hyp mode at: %#08lx\n",
- kvm_vcpu_get_hyp_pc(vcpu));
- BUG();
- panic("KVM: Hypervisor undefined exception!\n");
- case ARM_EXCEPTION_DATA_ABORT:
- case ARM_EXCEPTION_PREF_ABORT:
case ARM_EXCEPTION_HVC:
/*
* See ARM ARM B1.14.1: "Hyp traps on instructions
diff --git a/arch/arm/kvm/hyp/Makefile b/arch/arm/kvm/hyp/Makefile
new file mode 100644
index 0000000..8dfa5f7
--- /dev/null
+++ b/arch/arm/kvm/hyp/Makefile
@@ -0,0 +1,17 @@
+#
+# Makefile for Kernel-based Virtual Machine module, HYP part
+#
+
+KVM=../../../../virt/kvm
+
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
+
+obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
+obj-$(CONFIG_KVM_ARM_HOST) += cp15-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += vfp.o
+obj-$(CONFIG_KVM_ARM_HOST) += banked-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += switch.o
+obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
diff --git a/arch/arm/kvm/hyp/banked-sr.c b/arch/arm/kvm/hyp/banked-sr.c
new file mode 100644
index 0000000..111bda8
--- /dev/null
+++ b/arch/arm/kvm/hyp/banked-sr.c
@@ -0,0 +1,77 @@
+/*
+ * Original code:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/kvm_hyp.h>
+
+__asm__(".arch_extension virt");
+
+void __hyp_text __banked_save_state(struct kvm_cpu_context *ctxt)
+{
+ ctxt->gp_regs.usr_regs.ARM_sp = read_special(SP_usr);
+ ctxt->gp_regs.usr_regs.ARM_pc = read_special(ELR_hyp);
+ ctxt->gp_regs.usr_regs.ARM_cpsr = read_special(SPSR);
+ ctxt->gp_regs.KVM_ARM_SVC_sp = read_special(SP_svc);
+ ctxt->gp_regs.KVM_ARM_SVC_lr = read_special(LR_svc);
+ ctxt->gp_regs.KVM_ARM_SVC_spsr = read_special(SPSR_svc);
+ ctxt->gp_regs.KVM_ARM_ABT_sp = read_special(SP_abt);
+ ctxt->gp_regs.KVM_ARM_ABT_lr = read_special(LR_abt);
+ ctxt->gp_regs.KVM_ARM_ABT_spsr = read_special(SPSR_abt);
+ ctxt->gp_regs.KVM_ARM_UND_sp = read_special(SP_und);
+ ctxt->gp_regs.KVM_ARM_UND_lr = read_special(LR_und);
+ ctxt->gp_regs.KVM_ARM_UND_spsr = read_special(SPSR_und);
+ ctxt->gp_regs.KVM_ARM_IRQ_sp = read_special(SP_irq);
+ ctxt->gp_regs.KVM_ARM_IRQ_lr = read_special(LR_irq);
+ ctxt->gp_regs.KVM_ARM_IRQ_spsr = read_special(SPSR_irq);
+ ctxt->gp_regs.KVM_ARM_FIQ_r8 = read_special(R8_fiq);
+ ctxt->gp_regs.KVM_ARM_FIQ_r9 = read_special(R9_fiq);
+ ctxt->gp_regs.KVM_ARM_FIQ_r10 = read_special(R10_fiq);
+ ctxt->gp_regs.KVM_ARM_FIQ_fp = read_special(R11_fiq);
+ ctxt->gp_regs.KVM_ARM_FIQ_ip = read_special(R12_fiq);
+ ctxt->gp_regs.KVM_ARM_FIQ_sp = read_special(SP_fiq);
+ ctxt->gp_regs.KVM_ARM_FIQ_lr = read_special(LR_fiq);
+ ctxt->gp_regs.KVM_ARM_FIQ_spsr = read_special(SPSR_fiq);
+}
+
+void __hyp_text __banked_restore_state(struct kvm_cpu_context *ctxt)
+{
+ write_special(ctxt->gp_regs.usr_regs.ARM_sp, SP_usr);
+ write_special(ctxt->gp_regs.usr_regs.ARM_pc, ELR_hyp);
+ write_special(ctxt->gp_regs.usr_regs.ARM_cpsr, SPSR_cxsf);
+ write_special(ctxt->gp_regs.KVM_ARM_SVC_sp, SP_svc);
+ write_special(ctxt->gp_regs.KVM_ARM_SVC_lr, LR_svc);
+ write_special(ctxt->gp_regs.KVM_ARM_SVC_spsr, SPSR_svc);
+ write_special(ctxt->gp_regs.KVM_ARM_ABT_sp, SP_abt);
+ write_special(ctxt->gp_regs.KVM_ARM_ABT_lr, LR_abt);
+ write_special(ctxt->gp_regs.KVM_ARM_ABT_spsr, SPSR_abt);
+ write_special(ctxt->gp_regs.KVM_ARM_UND_sp, SP_und);
+ write_special(ctxt->gp_regs.KVM_ARM_UND_lr, LR_und);
+ write_special(ctxt->gp_regs.KVM_ARM_UND_spsr, SPSR_und);
+ write_special(ctxt->gp_regs.KVM_ARM_IRQ_sp, SP_irq);
+ write_special(ctxt->gp_regs.KVM_ARM_IRQ_lr, LR_irq);
+ write_special(ctxt->gp_regs.KVM_ARM_IRQ_spsr, SPSR_irq);
+ write_special(ctxt->gp_regs.KVM_ARM_FIQ_r8, R8_fiq);
+ write_special(ctxt->gp_regs.KVM_ARM_FIQ_r9, R9_fiq);
+ write_special(ctxt->gp_regs.KVM_ARM_FIQ_r10, R10_fiq);
+ write_special(ctxt->gp_regs.KVM_ARM_FIQ_fp, R11_fiq);
+ write_special(ctxt->gp_regs.KVM_ARM_FIQ_ip, R12_fiq);
+ write_special(ctxt->gp_regs.KVM_ARM_FIQ_sp, SP_fiq);
+ write_special(ctxt->gp_regs.KVM_ARM_FIQ_lr, LR_fiq);
+ write_special(ctxt->gp_regs.KVM_ARM_FIQ_spsr, SPSR_fiq);
+}
diff --git a/arch/arm/kvm/hyp/cp15-sr.c b/arch/arm/kvm/hyp/cp15-sr.c
new file mode 100644
index 0000000..c478281
--- /dev/null
+++ b/arch/arm/kvm/hyp/cp15-sr.c
@@ -0,0 +1,84 @@
+/*
+ * Original code:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/kvm_hyp.h>
+
+static u64 *cp15_64(struct kvm_cpu_context *ctxt, int idx)
+{
+ return (u64 *)(ctxt->cp15 + idx);
+}
+
+void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+{
+ ctxt->cp15[c0_MPIDR] = read_sysreg(VMPIDR);
+ ctxt->cp15[c0_CSSELR] = read_sysreg(CSSELR);
+ ctxt->cp15[c1_SCTLR] = read_sysreg(SCTLR);
+ ctxt->cp15[c1_CPACR] = read_sysreg(CPACR);
+ *cp15_64(ctxt, c2_TTBR0) = read_sysreg(TTBR0);
+ *cp15_64(ctxt, c2_TTBR1) = read_sysreg(TTBR1);
+ ctxt->cp15[c2_TTBCR] = read_sysreg(TTBCR);
+ ctxt->cp15[c3_DACR] = read_sysreg(DACR);
+ ctxt->cp15[c5_DFSR] = read_sysreg(DFSR);
+ ctxt->cp15[c5_IFSR] = read_sysreg(IFSR);
+ ctxt->cp15[c5_ADFSR] = read_sysreg(ADFSR);
+ ctxt->cp15[c5_AIFSR] = read_sysreg(AIFSR);
+ ctxt->cp15[c6_DFAR] = read_sysreg(DFAR);
+ ctxt->cp15[c6_IFAR] = read_sysreg(IFAR);
+ *cp15_64(ctxt, c7_PAR) = read_sysreg(PAR);
+ ctxt->cp15[c10_PRRR] = read_sysreg(PRRR);
+ ctxt->cp15[c10_NMRR] = read_sysreg(NMRR);
+ ctxt->cp15[c10_AMAIR0] = read_sysreg(AMAIR0);
+ ctxt->cp15[c10_AMAIR1] = read_sysreg(AMAIR1);
+ ctxt->cp15[c12_VBAR] = read_sysreg(VBAR);
+ ctxt->cp15[c13_CID] = read_sysreg(CID);
+ ctxt->cp15[c13_TID_URW] = read_sysreg(TID_URW);
+ ctxt->cp15[c13_TID_URO] = read_sysreg(TID_URO);
+ ctxt->cp15[c13_TID_PRIV] = read_sysreg(TID_PRIV);
+ ctxt->cp15[c14_CNTKCTL] = read_sysreg(CNTKCTL);
+}
+
+void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+{
+ write_sysreg(ctxt->cp15[c0_MPIDR], VMPIDR);
+ write_sysreg(ctxt->cp15[c0_CSSELR], CSSELR);
+ write_sysreg(ctxt->cp15[c1_SCTLR], SCTLR);
+ write_sysreg(ctxt->cp15[c1_CPACR], CPACR);
+ write_sysreg(*cp15_64(ctxt, c2_TTBR0), TTBR0);
+ write_sysreg(*cp15_64(ctxt, c2_TTBR1), TTBR1);
+ write_sysreg(ctxt->cp15[c2_TTBCR], TTBCR);
+ write_sysreg(ctxt->cp15[c3_DACR], DACR);
+ write_sysreg(ctxt->cp15[c5_DFSR], DFSR);
+ write_sysreg(ctxt->cp15[c5_IFSR], IFSR);
+ write_sysreg(ctxt->cp15[c5_ADFSR], ADFSR);
+ write_sysreg(ctxt->cp15[c5_AIFSR], AIFSR);
+ write_sysreg(ctxt->cp15[c6_DFAR], DFAR);
+ write_sysreg(ctxt->cp15[c6_IFAR], IFAR);
+ write_sysreg(*cp15_64(ctxt, c7_PAR), PAR);
+ write_sysreg(ctxt->cp15[c10_PRRR], PRRR);
+ write_sysreg(ctxt->cp15[c10_NMRR], NMRR);
+ write_sysreg(ctxt->cp15[c10_AMAIR0], AMAIR0);
+ write_sysreg(ctxt->cp15[c10_AMAIR1], AMAIR1);
+ write_sysreg(ctxt->cp15[c12_VBAR], VBAR);
+ write_sysreg(ctxt->cp15[c13_CID], CID);
+ write_sysreg(ctxt->cp15[c13_TID_URW], TID_URW);
+ write_sysreg(ctxt->cp15[c13_TID_URO], TID_URO);
+ write_sysreg(ctxt->cp15[c13_TID_PRIV], TID_PRIV);
+ write_sysreg(ctxt->cp15[c14_CNTKCTL], CNTKCTL);
+}
diff --git a/arch/arm/kvm/hyp/entry.S b/arch/arm/kvm/hyp/entry.S
new file mode 100644
index 0000000..21c2388
--- /dev/null
+++ b/arch/arm/kvm/hyp/entry.S
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2016 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+*/
+
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/kvm_arm.h>
+
+ .arch_extension virt
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+#define USR_REGS_OFFSET (CPU_CTXT_GP_REGS + GP_REGS_USR)
+
+/* int __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host) */
+ENTRY(__guest_enter)
+ @ Save host registers
+ add r1, r1, #(USR_REGS_OFFSET + S_R4)
+ stm r1!, {r4-r12}
+ str lr, [r1, #4] @ Skip SP_usr (already saved)
+
+ @ Restore guest registers
+ add r0, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
+ ldr lr, [r0, #S_LR]
+ ldm r0, {r0-r12}
+
+ clrex
+ eret
+ENDPROC(__guest_enter)
+
+ENTRY(__guest_exit)
+ /*
+ * return convention:
+ * guest r0, r1, r2 saved on the stack
+ * r0: vcpu pointer
+ * r1: exception code
+ */
+
+ add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R3)
+ stm r2!, {r3-r12}
+ str lr, [r2, #4]
+ add r2, r0, #(VCPU_GUEST_CTXT + USR_REGS_OFFSET + S_R0)
+ pop {r3, r4, r5} @ r0, r1, r2
+ stm r2, {r3-r5}
+
+ ldr r0, [r0, #VCPU_HOST_CTXT]
+ add r0, r0, #(USR_REGS_OFFSET + S_R4)
+ ldm r0!, {r4-r12}
+ ldr lr, [r0, #4]
+
+ mov r0, r1
+ bx lr
+ENDPROC(__guest_exit)
+
+/*
+ * If VFPv3 support is not available, then we will not switch the VFP
+ * registers; however cp10 and cp11 accesses will still trap and fallback
+ * to the regular coprocessor emulation code, which currently will
+ * inject an undefined exception to the guest.
+ */
+#ifdef CONFIG_VFPv3
+ENTRY(__vfp_guest_restore)
+ push {r3, r4, lr}
+
+ @ NEON/VFP used. Turn on VFP access.
+ mrc p15, 4, r1, c1, c1, 2 @ HCPTR
+ bic r1, r1, #(HCPTR_TCP(10) | HCPTR_TCP(11))
+ mcr p15, 4, r1, c1, c1, 2 @ HCPTR
+ isb
+
+ @ Switch VFP/NEON hardware state to the guest's
+ mov r4, r0
+ ldr r0, [r0, #VCPU_HOST_CTXT]
+ add r0, r0, #CPU_CTXT_VFP
+ bl __vfp_save_state
+ add r0, r4, #(VCPU_GUEST_CTXT + CPU_CTXT_VFP)
+ bl __vfp_restore_state
+
+ pop {r3, r4, lr}
+ pop {r0, r1, r2}
+ clrex
+ eret
+ENDPROC(__vfp_guest_restore)
+#endif
+
+ .popsection
+
diff --git a/arch/arm/kvm/hyp/hyp-entry.S b/arch/arm/kvm/hyp/hyp-entry.S
new file mode 100644
index 0000000..7809138
--- /dev/null
+++ b/arch/arm/kvm/hyp/hyp-entry.S
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
+ */
+
+#include <linux/linkage.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+
+ .arch_extension virt
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+.macro load_vcpu reg
+ mrc p15, 4, \reg, c13, c0, 2 @ HTPIDR
+.endm
+
+/********************************************************************
+ * Hypervisor exception vector and handlers
+ *
+ *
+ * The KVM/ARM Hypervisor ABI is defined as follows:
+ *
+ * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
+ * instruction is issued since all traps are disabled when running the host
+ * kernel as per the Hyp-mode initialization at boot time.
+ *
+ * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
+ * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
+ * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
+ * instructions are called from within Hyp-mode.
+ *
+ * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
+ * Switching to Hyp mode is done through a simple HVC #0 instruction. The
+ * exception vector code will check that the HVC comes from VMID==0.
+ * - r0 contains a pointer to a HYP function
+ * - r1, r2, and r3 contain arguments to the above function.
+ * - The HYP function will be called with its arguments in r0, r1 and r2.
+ * On HYP function return, we return directly to SVC.
+ *
+ * Note that the above is used to execute code in Hyp-mode from a host-kernel
+ * point of view, and is a different concept from performing a world-switch and
+ * executing guest code SVC mode (with a VMID != 0).
+ */
+
+ .align 5
+__kvm_hyp_vector:
+ .global __kvm_hyp_vector
+
+ @ Hyp-mode exception vector
+ W(b) hyp_reset
+ W(b) hyp_undef
+ W(b) hyp_svc
+ W(b) hyp_pabt
+ W(b) hyp_dabt
+ W(b) hyp_hvc
+ W(b) hyp_irq
+ W(b) hyp_fiq
+
+.macro invalid_vector label, cause
+ .align
+\label: mov r0, #\cause
+ b __hyp_panic
+.endm
+
+ invalid_vector hyp_reset ARM_EXCEPTION_RESET
+ invalid_vector hyp_undef ARM_EXCEPTION_UNDEFINED
+ invalid_vector hyp_svc ARM_EXCEPTION_SOFTWARE
+ invalid_vector hyp_pabt ARM_EXCEPTION_PREF_ABORT
+ invalid_vector hyp_dabt ARM_EXCEPTION_DATA_ABORT
+ invalid_vector hyp_fiq ARM_EXCEPTION_FIQ
+
+ENTRY(__hyp_do_panic)
+ mrs lr, cpsr
+ bic lr, lr, #MODE_MASK
+ orr lr, lr, #SVC_MODE
+THUMB( orr lr, lr, #PSR_T_BIT )
+ msr spsr_cxsf, lr
+ ldr lr, =panic
+ msr ELR_hyp, lr
+ ldr lr, =kvm_call_hyp
+ clrex
+ eret
+ENDPROC(__hyp_do_panic)
+
+hyp_hvc:
+ /*
+ * Getting here is either because of a trap from a guest,
+ * or from executing HVC from the host kernel, which means
+ * "do something in Hyp mode".
+ */
+ push {r0, r1, r2}
+
+ @ Check syndrome register
+ mrc p15, 4, r1, c5, c2, 0 @ HSR
+ lsr r0, r1, #HSR_EC_SHIFT
+ cmp r0, #HSR_EC_HVC
+ bne guest_trap @ Not HVC instr.
+
+ /*
+ * Let's check if the HVC came from VMID 0 and allow simple
+ * switch to Hyp mode
+ */
+ mrrc p15, 6, r0, r2, c2
+ lsr r2, r2, #16
+ and r2, r2, #0xff
+ cmp r2, #0
+ bne guest_trap @ Guest called HVC
+
+ /*
+ * Getting here means host called HVC, we shift parameters and branch
+ * to Hyp function.
+ */
+ pop {r0, r1, r2}
+
+ /* Check for __hyp_get_vectors */
+ cmp r0, #-1
+ mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
+ beq 1f
+
+ push {lr}
+
+ mov lr, r0
+ mov r0, r1
+ mov r1, r2
+ mov r2, r3
+
+THUMB( orr lr, #1)
+ blx lr @ Call the HYP function
+
+ pop {lr}
+1: eret
+
+guest_trap:
+ load_vcpu r0 @ Load VCPU pointer to r0
+
+#ifdef CONFIG_VFPv3
+ @ Check for a VFP access
+ lsr r1, r1, #HSR_EC_SHIFT
+ cmp r1, #HSR_EC_CP_0_13
+ beq __vfp_guest_restore
+#endif
+
+ mov r1, #ARM_EXCEPTION_HVC
+ b __guest_exit
+
+hyp_irq:
+ push {r0, r1, r2}
+ mov r1, #ARM_EXCEPTION_IRQ
+ load_vcpu r0 @ Load VCPU pointer to r0
+ b __guest_exit
+
+ .ltorg
+
+ .popsection
diff --git a/arch/arm/kvm/hyp/s2-setup.c b/arch/arm/kvm/hyp/s2-setup.c
new file mode 100644
index 0000000..7be39af
--- /dev/null
+++ b/arch/arm/kvm/hyp/s2-setup.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright (C) 2016 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/types.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+
+void __hyp_text __init_stage2_translation(void)
+{
+ u64 val;
+
+ val = read_sysreg(VTCR) & ~VTCR_MASK;
+
+ val |= read_sysreg(HTCR) & VTCR_HTCR_SH;
+ val |= KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S;
+
+ write_sysreg(val, VTCR);
+}
diff --git a/arch/arm/kvm/hyp/switch.c b/arch/arm/kvm/hyp/switch.c
new file mode 100644
index 0000000..b13caa9
--- /dev/null
+++ b/arch/arm/kvm/hyp/switch.c
@@ -0,0 +1,232 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+
+__asm__(".arch_extension virt");
+
+/*
+ * Activate the traps, saving the host's fpexc register before
+ * overwriting it. We'll restore it on VM exit.
+ */
+static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu, u32 *fpexc_host)
+{
+ u32 val;
+
+ /*
+ * We are about to set HCPTR.TCP10/11 to trap all floating point
+ * register accesses to HYP, however, the ARM ARM clearly states that
+ * traps are only taken to HYP if the operation would not otherwise
+ * trap to SVC. Therefore, always make sure that for 32-bit guests,
+ * we set FPEXC.EN to prevent traps to SVC, when setting the TCP bits.
+ */
+ val = read_sysreg(VFP_FPEXC);
+ *fpexc_host = val;
+ if (!(val & FPEXC_EN)) {
+ write_sysreg(val | FPEXC_EN, VFP_FPEXC);
+ isb();
+ }
+
+ write_sysreg(vcpu->arch.hcr | vcpu->arch.irq_lines, HCR);
+ /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
+ write_sysreg(HSTR_T(15), HSTR);
+ write_sysreg(HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11), HCPTR);
+ val = read_sysreg(HDCR);
+ write_sysreg(val | HDCR_TPM | HDCR_TPMCR, HDCR);
+}
+
+static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
+{
+ u32 val;
+
+ write_sysreg(0, HCR);
+ write_sysreg(0, HSTR);
+ val = read_sysreg(HDCR);
+ write_sysreg(val & ~(HDCR_TPM | HDCR_TPMCR), HDCR);
+ write_sysreg(0, HCPTR);
+}
+
+static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ write_sysreg(kvm->arch.vttbr, VTTBR);
+ write_sysreg(vcpu->arch.midr, VPIDR);
+}
+
+static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu)
+{
+ write_sysreg(0, VTTBR);
+ write_sysreg(read_sysreg(MIDR), VPIDR);
+}
+
+static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu)
+{
+ __vgic_v2_save_state(vcpu);
+}
+
+static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
+{
+ __vgic_v2_restore_state(vcpu);
+}
+
+static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
+{
+ u32 hsr = read_sysreg(HSR);
+ u8 ec = hsr >> HSR_EC_SHIFT;
+ u32 hpfar, far;
+
+ vcpu->arch.fault.hsr = hsr;
+
+ if (ec == HSR_EC_IABT)
+ far = read_sysreg(HIFAR);
+ else if (ec == HSR_EC_DABT)
+ far = read_sysreg(HDFAR);
+ else
+ return true;
+
+ /*
+ * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
+ *
+ * Abort on the stage 2 translation for a memory access from a
+ * Non-secure PL1 or PL0 mode:
+ *
+ * For any Access flag fault or Translation fault, and also for any
+ * Permission fault on the stage 2 translation of a memory access
+ * made as part of a translation table walk for a stage 1 translation,
+ * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
+ * is UNKNOWN.
+ */
+ if (!(hsr & HSR_DABT_S1PTW) && (hsr & HSR_FSC_TYPE) == FSC_PERM) {
+ u64 par, tmp;
+
+ par = read_sysreg(PAR);
+ write_sysreg(far, ATS1CPR);
+ isb();
+
+ tmp = read_sysreg(PAR);
+ write_sysreg(par, PAR);
+
+ if (unlikely(tmp & 1))
+ return false; /* Translation failed, back to guest */
+
+ hpfar = ((tmp >> 12) & ((1UL << 28) - 1)) << 4;
+ } else {
+ hpfar = read_sysreg(HPFAR);
+ }
+
+ vcpu->arch.fault.hxfar = far;
+ vcpu->arch.fault.hpfar = hpfar;
+ return true;
+}
+
+static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
+{
+ struct kvm_cpu_context *host_ctxt;
+ struct kvm_cpu_context *guest_ctxt;
+ bool fp_enabled;
+ u64 exit_code;
+ u32 fpexc;
+
+ vcpu = kern_hyp_va(vcpu);
+ write_sysreg(vcpu, HTPIDR);
+
+ host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ guest_ctxt = &vcpu->arch.ctxt;
+
+ __sysreg_save_state(host_ctxt);
+ __banked_save_state(host_ctxt);
+
+ __activate_traps(vcpu, &fpexc);
+ __activate_vm(vcpu);
+
+ __vgic_restore_state(vcpu);
+ __timer_restore_state(vcpu);
+
+ __sysreg_restore_state(guest_ctxt);
+ __banked_restore_state(guest_ctxt);
+
+ /* Jump in the fire! */
+again:
+ exit_code = __guest_enter(vcpu, host_ctxt);
+ /* And we're baaack! */
+
+ if (exit_code == ARM_EXCEPTION_HVC && !__populate_fault_info(vcpu))
+ goto again;
+
+ fp_enabled = __vfp_enabled();
+
+ __banked_save_state(guest_ctxt);
+ __sysreg_save_state(guest_ctxt);
+ __timer_save_state(vcpu);
+ __vgic_save_state(vcpu);
+
+ __deactivate_traps(vcpu);
+ __deactivate_vm(vcpu);
+
+ __banked_restore_state(host_ctxt);
+ __sysreg_restore_state(host_ctxt);
+
+ if (fp_enabled) {
+ __vfp_save_state(&guest_ctxt->vfp);
+ __vfp_restore_state(&host_ctxt->vfp);
+ }
+
+ write_sysreg(fpexc, VFP_FPEXC);
+
+ return exit_code;
+}
+
+__alias(__guest_run) int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
+
+static const char * const __hyp_panic_string[] = {
+ [ARM_EXCEPTION_RESET] = "\nHYP panic: RST PC:%08x CPSR:%08x",
+ [ARM_EXCEPTION_UNDEFINED] = "\nHYP panic: UNDEF PC:%08x CPSR:%08x",
+ [ARM_EXCEPTION_SOFTWARE] = "\nHYP panic: SVC PC:%08x CPSR:%08x",
+ [ARM_EXCEPTION_PREF_ABORT] = "\nHYP panic: PABRT PC:%08x CPSR:%08x",
+ [ARM_EXCEPTION_DATA_ABORT] = "\nHYP panic: DABRT PC:%08x ADDR:%08x",
+ [ARM_EXCEPTION_IRQ] = "\nHYP panic: IRQ PC:%08x CPSR:%08x",
+ [ARM_EXCEPTION_FIQ] = "\nHYP panic: FIQ PC:%08x CPSR:%08x",
+ [ARM_EXCEPTION_HVC] = "\nHYP panic: HVC PC:%08x CPSR:%08x",
+};
+
+void __hyp_text __noreturn __hyp_panic(int cause)
+{
+ u32 elr = read_special(ELR_hyp);
+ u32 val;
+
+ if (cause == ARM_EXCEPTION_DATA_ABORT)
+ val = read_sysreg(HDFAR);
+ else
+ val = read_special(SPSR);
+
+ if (read_sysreg(VTTBR)) {
+ struct kvm_vcpu *vcpu;
+ struct kvm_cpu_context *host_ctxt;
+
+ vcpu = (struct kvm_vcpu *)read_sysreg(HTPIDR);
+ host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
+ __deactivate_traps(vcpu);
+ __deactivate_vm(vcpu);
+ __sysreg_restore_state(host_ctxt);
+ }
+
+ /* Call panic for real */
+ __hyp_do_panic(__hyp_panic_string[cause], elr, val);
+
+ unreachable();
+}
diff --git a/arch/arm/kvm/hyp/tlb.c b/arch/arm/kvm/hyp/tlb.c
new file mode 100644
index 0000000..a263600
--- /dev/null
+++ b/arch/arm/kvm/hyp/tlb.c
@@ -0,0 +1,70 @@
+/*
+ * Original code:
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * Mostly rewritten in C by Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <asm/kvm_hyp.h>
+
+/**
+ * Flush per-VMID TLBs
+ *
+ * __kvm_tlb_flush_vmid(struct kvm *kvm);
+ *
+ * We rely on the hardware to broadcast the TLB invalidation to all CPUs
+ * inside the inner-shareable domain (which is the case for all v7
+ * implementations). If we come across a non-IS SMP implementation, we'll
+ * have to use an IPI based mechanism. Until then, we stick to the simple
+ * hardware assisted version.
+ *
+ * As v7 does not support flushing per IPA, just nuke the whole TLB
+ * instead, ignoring the ipa value.
+ */
+static void __hyp_text __tlb_flush_vmid(struct kvm *kvm)
+{
+ dsb(ishst);
+
+ /* Switch to requested VMID */
+ kvm = kern_hyp_va(kvm);
+ write_sysreg(kvm->arch.vttbr, VTTBR);
+ isb();
+
+ write_sysreg(0, TLBIALLIS);
+ dsb(ish);
+ isb();
+
+ write_sysreg(0, VTTBR);
+}
+
+__alias(__tlb_flush_vmid) void __kvm_tlb_flush_vmid(struct kvm *kvm);
+
+static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
+{
+ __tlb_flush_vmid(kvm);
+}
+
+__alias(__tlb_flush_vmid_ipa) void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm,
+ phys_addr_t ipa);
+
+static void __hyp_text __tlb_flush_vm_context(void)
+{
+ write_sysreg(0, TLBIALLNSNHIS);
+ write_sysreg(0, ICIALLUIS);
+ dsb(ish);
+}
+
+__alias(__tlb_flush_vm_context) void __kvm_flush_vm_context(void);
diff --git a/arch/arm/kvm/hyp/vfp.S b/arch/arm/kvm/hyp/vfp.S
new file mode 100644
index 0000000..7c297e8
--- /dev/null
+++ b/arch/arm/kvm/hyp/vfp.S
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2012 - Virtual Open Systems and Columbia University
+ * Author: Christoffer Dall <c.dall@virtualopensystems.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/linkage.h>
+#include <asm/vfpmacros.h>
+
+ .text
+ .pushsection .hyp.text, "ax"
+
+/* void __vfp_save_state(struct vfp_hard_struct *vfp); */
+ENTRY(__vfp_save_state)
+ push {r4, r5}
+ VFPFMRX r1, FPEXC
+
+ @ Make sure *really* VFP is enabled so we can touch the registers.
+ orr r5, r1, #FPEXC_EN
+ tst r5, #FPEXC_EX @ Check for VFP Subarchitecture
+ bic r5, r5, #FPEXC_EX @ FPEXC_EX disable
+ VFPFMXR FPEXC, r5
+ isb
+
+ VFPFMRX r2, FPSCR
+ beq 1f
+
+ @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
+ @ we only need to save them if FPEXC_EX is set.
+ VFPFMRX r3, FPINST
+ tst r5, #FPEXC_FP2V
+ VFPFMRX r4, FPINST2, ne @ vmrsne
+1:
+ VFPFSTMIA r0, r5 @ Save VFP registers
+ stm r0, {r1-r4} @ Save FPEXC, FPSCR, FPINST, FPINST2
+ pop {r4, r5}
+ bx lr
+ENDPROC(__vfp_save_state)
+
+/* void __vfp_restore_state(struct vfp_hard_struct *vfp);
+ * Assume FPEXC_EN is on and FPEXC_EX is off */
+ENTRY(__vfp_restore_state)
+ VFPFLDMIA r0, r1 @ Load VFP registers
+ ldm r0, {r0-r3} @ Load FPEXC, FPSCR, FPINST, FPINST2
+
+ VFPFMXR FPSCR, r1
+ tst r0, #FPEXC_EX @ Check for VFP Subarchitecture
+ beq 1f
+ VFPFMXR FPINST, r2
+ tst r0, #FPEXC_FP2V
+ VFPFMXR FPINST2, r3, ne
+1:
+ VFPFMXR FPEXC, r0 @ FPEXC (last, in case !EN)
+ bx lr
+ENDPROC(__vfp_restore_state)
+
+ .popsection
diff --git a/arch/arm/kvm/init.S b/arch/arm/kvm/init.S
index 3988e72..1f9ae17 100644
--- a/arch/arm/kvm/init.S
+++ b/arch/arm/kvm/init.S
@@ -84,14 +84,6 @@
orr r0, r0, r1
mcr p15, 4, r0, c2, c0, 2 @ HTCR
- mrc p15, 4, r1, c2, c1, 2 @ VTCR
- ldr r2, =VTCR_MASK
- bic r1, r1, r2
- bic r0, r0, #(~VTCR_HTCR_SH) @ clear non-reusable HTCR bits
- orr r1, r0, r1
- orr r1, r1, #(KVM_VTCR_SL0 | KVM_VTCR_T0SZ | KVM_VTCR_S)
- mcr p15, 4, r1, c2, c1, 2 @ VTCR
-
@ Use the same memory attributes for hyp. accesses as the kernel
@ (copy MAIRx ro HMAIRx).
mrc p15, 0, r0, c10, c2, 0
diff --git a/arch/arm/kvm/interrupts.S b/arch/arm/kvm/interrupts.S
index 900ef6d..b1bd316 100644
--- a/arch/arm/kvm/interrupts.S
+++ b/arch/arm/kvm/interrupts.S
@@ -17,211 +17,14 @@
*/
#include <linux/linkage.h>
-#include <linux/const.h>
-#include <asm/unified.h>
-#include <asm/page.h>
-#include <asm/ptrace.h>
-#include <asm/asm-offsets.h>
-#include <asm/kvm_asm.h>
-#include <asm/kvm_arm.h>
-#include <asm/vfpmacros.h>
-#include "interrupts_head.S"
.text
-__kvm_hyp_code_start:
- .globl __kvm_hyp_code_start
-
-/********************************************************************
- * Flush per-VMID TLBs
- *
- * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
- *
- * We rely on the hardware to broadcast the TLB invalidation to all CPUs
- * inside the inner-shareable domain (which is the case for all v7
- * implementations). If we come across a non-IS SMP implementation, we'll
- * have to use an IPI based mechanism. Until then, we stick to the simple
- * hardware assisted version.
- *
- * As v7 does not support flushing per IPA, just nuke the whole TLB
- * instead, ignoring the ipa value.
- */
-ENTRY(__kvm_tlb_flush_vmid_ipa)
- push {r2, r3}
-
- dsb ishst
- add r0, r0, #KVM_VTTBR
- ldrd r2, r3, [r0]
- mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
- isb
- mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored)
- dsb ish
- isb
- mov r2, #0
- mov r3, #0
- mcrr p15, 6, r2, r3, c2 @ Back to VMID #0
- isb @ Not necessary if followed by eret
-
- pop {r2, r3}
- bx lr
-ENDPROC(__kvm_tlb_flush_vmid_ipa)
-
-/**
- * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
- *
- * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address
- * parameter
- */
-
-ENTRY(__kvm_tlb_flush_vmid)
- b __kvm_tlb_flush_vmid_ipa
-ENDPROC(__kvm_tlb_flush_vmid)
-
-/********************************************************************
- * Flush TLBs and instruction caches of all CPUs inside the inner-shareable
- * domain, for all VMIDs
- *
- * void __kvm_flush_vm_context(void);
- */
-ENTRY(__kvm_flush_vm_context)
- mov r0, #0 @ rn parameter for c15 flushes is SBZ
-
- /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */
- mcr p15, 4, r0, c8, c3, 4
- /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */
- mcr p15, 0, r0, c7, c1, 0
- dsb ish
- isb @ Not necessary if followed by eret
-
- bx lr
-ENDPROC(__kvm_flush_vm_context)
-
-
-/********************************************************************
- * Hypervisor world-switch code
- *
- *
- * int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
- */
-ENTRY(__kvm_vcpu_run)
- @ Save the vcpu pointer
- mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR
-
- save_host_regs
-
- restore_vgic_state
- restore_timer_state
-
- @ Store hardware CP15 state and load guest state
- read_cp15_state store_to_vcpu = 0
- write_cp15_state read_from_vcpu = 1
-
- @ If the host kernel has not been configured with VFPv3 support,
- @ then it is safer if we deny guests from using it as well.
-#ifdef CONFIG_VFPv3
- @ Set FPEXC_EN so the guest doesn't trap floating point instructions
- VFPFMRX r2, FPEXC @ VMRS
- push {r2}
- orr r2, r2, #FPEXC_EN
- VFPFMXR FPEXC, r2 @ VMSR
-#endif
-
- @ Configure Hyp-role
- configure_hyp_role vmentry
-
- @ Trap coprocessor CRx accesses
- set_hstr vmentry
- set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11))
- set_hdcr vmentry
-
- @ Write configured ID register into MIDR alias
- ldr r1, [vcpu, #VCPU_MIDR]
- mcr p15, 4, r1, c0, c0, 0
-
- @ Write guest view of MPIDR into VMPIDR
- ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)]
- mcr p15, 4, r1, c0, c0, 5
-
- @ Set up guest memory translation
- ldr r1, [vcpu, #VCPU_KVM]
- add r1, r1, #KVM_VTTBR
- ldrd r2, r3, [r1]
- mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR
-
- @ We're all done, just restore the GPRs and go to the guest
- restore_guest_regs
- clrex @ Clear exclusive monitor
- eret
-
-__kvm_vcpu_return:
- /*
- * return convention:
- * guest r0, r1, r2 saved on the stack
- * r0: vcpu pointer
- * r1: exception code
- */
- save_guest_regs
-
- @ Set VMID == 0
- mov r2, #0
- mov r3, #0
- mcrr p15, 6, r2, r3, c2 @ Write VTTBR
-
- @ Don't trap coprocessor accesses for host kernel
- set_hstr vmexit
- set_hdcr vmexit
- set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore
-
-#ifdef CONFIG_VFPv3
- @ Switch VFP/NEON hardware state to the host's
- add r7, vcpu, #VCPU_VFP_GUEST
- store_vfp_state r7
- add r7, vcpu, #VCPU_VFP_HOST
- ldr r7, [r7]
- restore_vfp_state r7
-
-after_vfp_restore:
- @ Restore FPEXC_EN which we clobbered on entry
- pop {r2}
- VFPFMXR FPEXC, r2
-#else
-after_vfp_restore:
-#endif
-
- @ Reset Hyp-role
- configure_hyp_role vmexit
-
- @ Let host read hardware MIDR
- mrc p15, 0, r2, c0, c0, 0
- mcr p15, 4, r2, c0, c0, 0
-
- @ Back to hardware MPIDR
- mrc p15, 0, r2, c0, c0, 5
- mcr p15, 4, r2, c0, c0, 5
-
- @ Store guest CP15 state and restore host state
- read_cp15_state store_to_vcpu = 1
- write_cp15_state read_from_vcpu = 0
-
- save_timer_state
- save_vgic_state
-
- restore_host_regs
- clrex @ Clear exclusive monitor
-#ifndef CONFIG_CPU_ENDIAN_BE8
- mov r0, r1 @ Return the return code
- mov r1, #0 @ Clear upper bits in return value
-#else
- @ r1 already has return code
- mov r0, #0 @ Clear upper bits in return value
-#endif /* CONFIG_CPU_ENDIAN_BE8 */
- bx lr @ return to IOCTL
-
/********************************************************************
* Call function in Hyp mode
*
*
- * u64 kvm_call_hyp(void *hypfn, ...);
+ * unsigned long kvm_call_hyp(void *hypfn, ...);
*
* This is not really a variadic function in the classic C-way and care must
* be taken when calling this to ensure parameters are passed in registers
@@ -232,7 +35,7 @@
* passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the
* function pointer can be passed). The function being called must be mapped
* in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
- * passed in r0 and r1.
+ * passed in r0 (strictly 32bit).
*
* A function pointer with a value of 0xffffffff has a special meaning,
* and is used to implement __hyp_get_vectors in the same way as in
@@ -246,281 +49,4 @@
ENTRY(kvm_call_hyp)
hvc #0
bx lr
-
-/********************************************************************
- * Hypervisor exception vector and handlers
- *
- *
- * The KVM/ARM Hypervisor ABI is defined as follows:
- *
- * Entry to Hyp mode from the host kernel will happen _only_ when an HVC
- * instruction is issued since all traps are disabled when running the host
- * kernel as per the Hyp-mode initialization at boot time.
- *
- * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc
- * below) when the HVC instruction is called from SVC mode (i.e. a guest or the
- * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC
- * instructions are called from within Hyp-mode.
- *
- * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode):
- * Switching to Hyp mode is done through a simple HVC #0 instruction. The
- * exception vector code will check that the HVC comes from VMID==0 and if
- * so will push the necessary state (SPSR, lr_usr) on the Hyp stack.
- * - r0 contains a pointer to a HYP function
- * - r1, r2, and r3 contain arguments to the above function.
- * - The HYP function will be called with its arguments in r0, r1 and r2.
- * On HYP function return, we return directly to SVC.
- *
- * Note that the above is used to execute code in Hyp-mode from a host-kernel
- * point of view, and is a different concept from performing a world-switch and
- * executing guest code SVC mode (with a VMID != 0).
- */
-
-/* Handle undef, svc, pabt, or dabt by crashing with a user notice */
-.macro bad_exception exception_code, panic_str
- push {r0-r2}
- mrrc p15, 6, r0, r1, c2 @ Read VTTBR
- lsr r1, r1, #16
- ands r1, r1, #0xff
- beq 99f
-
- load_vcpu @ Load VCPU pointer
- .if \exception_code == ARM_EXCEPTION_DATA_ABORT
- mrc p15, 4, r2, c5, c2, 0 @ HSR
- mrc p15, 4, r1, c6, c0, 0 @ HDFAR
- str r2, [vcpu, #VCPU_HSR]
- str r1, [vcpu, #VCPU_HxFAR]
- .endif
- .if \exception_code == ARM_EXCEPTION_PREF_ABORT
- mrc p15, 4, r2, c5, c2, 0 @ HSR
- mrc p15, 4, r1, c6, c0, 2 @ HIFAR
- str r2, [vcpu, #VCPU_HSR]
- str r1, [vcpu, #VCPU_HxFAR]
- .endif
- mov r1, #\exception_code
- b __kvm_vcpu_return
-
- @ We were in the host already. Let's craft a panic-ing return to SVC.
-99: mrs r2, cpsr
- bic r2, r2, #MODE_MASK
- orr r2, r2, #SVC_MODE
-THUMB( orr r2, r2, #PSR_T_BIT )
- msr spsr_cxsf, r2
- mrs r1, ELR_hyp
- ldr r2, =panic
- msr ELR_hyp, r2
- ldr r0, =\panic_str
- clrex @ Clear exclusive monitor
- eret
-.endm
-
- .text
-
- .align 5
-__kvm_hyp_vector:
- .globl __kvm_hyp_vector
-
- @ Hyp-mode exception vector
- W(b) hyp_reset
- W(b) hyp_undef
- W(b) hyp_svc
- W(b) hyp_pabt
- W(b) hyp_dabt
- W(b) hyp_hvc
- W(b) hyp_irq
- W(b) hyp_fiq
-
- .align
-hyp_reset:
- b hyp_reset
-
- .align
-hyp_undef:
- bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str
-
- .align
-hyp_svc:
- bad_exception ARM_EXCEPTION_HVC, svc_die_str
-
- .align
-hyp_pabt:
- bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str
-
- .align
-hyp_dabt:
- bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str
-
- .align
-hyp_hvc:
- /*
- * Getting here is either becuase of a trap from a guest or from calling
- * HVC from the host kernel, which means "switch to Hyp mode".
- */
- push {r0, r1, r2}
-
- @ Check syndrome register
- mrc p15, 4, r1, c5, c2, 0 @ HSR
- lsr r0, r1, #HSR_EC_SHIFT
- cmp r0, #HSR_EC_HVC
- bne guest_trap @ Not HVC instr.
-
- /*
- * Let's check if the HVC came from VMID 0 and allow simple
- * switch to Hyp mode
- */
- mrrc p15, 6, r0, r2, c2
- lsr r2, r2, #16
- and r2, r2, #0xff
- cmp r2, #0
- bne guest_trap @ Guest called HVC
-
- /*
- * Getting here means host called HVC, we shift parameters and branch
- * to Hyp function.
- */
- pop {r0, r1, r2}
-
- /* Check for __hyp_get_vectors */
- cmp r0, #-1
- mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR
- beq 1f
-
- push {lr}
- mrs lr, SPSR
- push {lr}
-
- mov lr, r0
- mov r0, r1
- mov r1, r2
- mov r2, r3
-
-THUMB( orr lr, #1)
- blx lr @ Call the HYP function
-
- pop {lr}
- msr SPSR_csxf, lr
- pop {lr}
-1: eret
-
-guest_trap:
- load_vcpu @ Load VCPU pointer to r0
- str r1, [vcpu, #VCPU_HSR]
-
- @ Check if we need the fault information
- lsr r1, r1, #HSR_EC_SHIFT
-#ifdef CONFIG_VFPv3
- cmp r1, #HSR_EC_CP_0_13
- beq switch_to_guest_vfp
-#endif
- cmp r1, #HSR_EC_IABT
- mrceq p15, 4, r2, c6, c0, 2 @ HIFAR
- beq 2f
- cmp r1, #HSR_EC_DABT
- bne 1f
- mrc p15, 4, r2, c6, c0, 0 @ HDFAR
-
-2: str r2, [vcpu, #VCPU_HxFAR]
-
- /*
- * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode:
- *
- * Abort on the stage 2 translation for a memory access from a
- * Non-secure PL1 or PL0 mode:
- *
- * For any Access flag fault or Translation fault, and also for any
- * Permission fault on the stage 2 translation of a memory access
- * made as part of a translation table walk for a stage 1 translation,
- * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR
- * is UNKNOWN.
- */
-
- /* Check for permission fault, and S1PTW */
- mrc p15, 4, r1, c5, c2, 0 @ HSR
- and r0, r1, #HSR_FSC_TYPE
- cmp r0, #FSC_PERM
- tsteq r1, #(1 << 7) @ S1PTW
- mrcne p15, 4, r2, c6, c0, 4 @ HPFAR
- bne 3f
-
- /* Preserve PAR */
- mrrc p15, 0, r0, r1, c7 @ PAR
- push {r0, r1}
-
- /* Resolve IPA using the xFAR */
- mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR
- isb
- mrrc p15, 0, r0, r1, c7 @ PAR
- tst r0, #1
- bne 4f @ Failed translation
- ubfx r2, r0, #12, #20
- lsl r2, r2, #4
- orr r2, r2, r1, lsl #24
-
- /* Restore PAR */
- pop {r0, r1}
- mcrr p15, 0, r0, r1, c7 @ PAR
-
-3: load_vcpu @ Load VCPU pointer to r0
- str r2, [r0, #VCPU_HPFAR]
-
-1: mov r1, #ARM_EXCEPTION_HVC
- b __kvm_vcpu_return
-
-4: pop {r0, r1} @ Failed translation, return to guest
- mcrr p15, 0, r0, r1, c7 @ PAR
- clrex
- pop {r0, r1, r2}
- eret
-
-/*
- * If VFPv3 support is not available, then we will not switch the VFP
- * registers; however cp10 and cp11 accesses will still trap and fallback
- * to the regular coprocessor emulation code, which currently will
- * inject an undefined exception to the guest.
- */
-#ifdef CONFIG_VFPv3
-switch_to_guest_vfp:
- push {r3-r7}
-
- @ NEON/VFP used. Turn on VFP access.
- set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11))
-
- @ Switch VFP/NEON hardware state to the guest's
- add r7, r0, #VCPU_VFP_HOST
- ldr r7, [r7]
- store_vfp_state r7
- add r7, r0, #VCPU_VFP_GUEST
- restore_vfp_state r7
-
- pop {r3-r7}
- pop {r0-r2}
- clrex
- eret
-#endif
-
- .align
-hyp_irq:
- push {r0, r1, r2}
- mov r1, #ARM_EXCEPTION_IRQ
- load_vcpu @ Load VCPU pointer to r0
- b __kvm_vcpu_return
-
- .align
-hyp_fiq:
- b hyp_fiq
-
- .ltorg
-
-__kvm_hyp_code_end:
- .globl __kvm_hyp_code_end
-
- .section ".rodata"
-
-und_die_str:
- .ascii "unexpected undefined exception in Hyp mode at: %#08x\n"
-pabt_die_str:
- .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n"
-dabt_die_str:
- .ascii "unexpected data abort in Hyp mode at: %#08x\n"
-svc_die_str:
- .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n"
+ENDPROC(kvm_call_hyp)
diff --git a/arch/arm/kvm/interrupts_head.S b/arch/arm/kvm/interrupts_head.S
deleted file mode 100644
index 51a5950..0000000
--- a/arch/arm/kvm/interrupts_head.S
+++ /dev/null
@@ -1,648 +0,0 @@
-#include <linux/irqchip/arm-gic.h>
-#include <asm/assembler.h>
-
-#define VCPU_USR_REG(_reg_nr) (VCPU_USR_REGS + (_reg_nr * 4))
-#define VCPU_USR_SP (VCPU_USR_REG(13))
-#define VCPU_USR_LR (VCPU_USR_REG(14))
-#define CP15_OFFSET(_cp15_reg_idx) (VCPU_CP15 + (_cp15_reg_idx * 4))
-
-/*
- * Many of these macros need to access the VCPU structure, which is always
- * held in r0. These macros should never clobber r1, as it is used to hold the
- * exception code on the return path (except of course the macro that switches
- * all the registers before the final jump to the VM).
- */
-vcpu .req r0 @ vcpu pointer always in r0
-
-/* Clobbers {r2-r6} */
-.macro store_vfp_state vfp_base
- @ The VFPFMRX and VFPFMXR macros are the VMRS and VMSR instructions
- VFPFMRX r2, FPEXC
- @ Make sure VFP is enabled so we can touch the registers.
- orr r6, r2, #FPEXC_EN
- VFPFMXR FPEXC, r6
-
- VFPFMRX r3, FPSCR
- tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
- beq 1f
- @ If FPEXC_EX is 0, then FPINST/FPINST2 reads are upredictable, so
- @ we only need to save them if FPEXC_EX is set.
- VFPFMRX r4, FPINST
- tst r2, #FPEXC_FP2V
- VFPFMRX r5, FPINST2, ne @ vmrsne
- bic r6, r2, #FPEXC_EX @ FPEXC_EX disable
- VFPFMXR FPEXC, r6
-1:
- VFPFSTMIA \vfp_base, r6 @ Save VFP registers
- stm \vfp_base, {r2-r5} @ Save FPEXC, FPSCR, FPINST, FPINST2
-.endm
-
-/* Assume FPEXC_EN is on and FPEXC_EX is off, clobbers {r2-r6} */
-.macro restore_vfp_state vfp_base
- VFPFLDMIA \vfp_base, r6 @ Load VFP registers
- ldm \vfp_base, {r2-r5} @ Load FPEXC, FPSCR, FPINST, FPINST2
-
- VFPFMXR FPSCR, r3
- tst r2, #FPEXC_EX @ Check for VFP Subarchitecture
- beq 1f
- VFPFMXR FPINST, r4
- tst r2, #FPEXC_FP2V
- VFPFMXR FPINST2, r5, ne
-1:
- VFPFMXR FPEXC, r2 @ FPEXC (last, in case !EN)
-.endm
-
-/* These are simply for the macros to work - value don't have meaning */
-.equ usr, 0
-.equ svc, 1
-.equ abt, 2
-.equ und, 3
-.equ irq, 4
-.equ fiq, 5
-
-.macro push_host_regs_mode mode
- mrs r2, SP_\mode
- mrs r3, LR_\mode
- mrs r4, SPSR_\mode
- push {r2, r3, r4}
-.endm
-
-/*
- * Store all host persistent registers on the stack.
- * Clobbers all registers, in all modes, except r0 and r1.
- */
-.macro save_host_regs
- /* Hyp regs. Only ELR_hyp (SPSR_hyp already saved) */
- mrs r2, ELR_hyp
- push {r2}
-
- /* usr regs */
- push {r4-r12} @ r0-r3 are always clobbered
- mrs r2, SP_usr
- mov r3, lr
- push {r2, r3}
-
- push_host_regs_mode svc
- push_host_regs_mode abt
- push_host_regs_mode und
- push_host_regs_mode irq
-
- /* fiq regs */
- mrs r2, r8_fiq
- mrs r3, r9_fiq
- mrs r4, r10_fiq
- mrs r5, r11_fiq
- mrs r6, r12_fiq
- mrs r7, SP_fiq
- mrs r8, LR_fiq
- mrs r9, SPSR_fiq
- push {r2-r9}
-.endm
-
-.macro pop_host_regs_mode mode
- pop {r2, r3, r4}
- msr SP_\mode, r2
- msr LR_\mode, r3
- msr SPSR_\mode, r4
-.endm
-
-/*
- * Restore all host registers from the stack.
- * Clobbers all registers, in all modes, except r0 and r1.
- */
-.macro restore_host_regs
- pop {r2-r9}
- msr r8_fiq, r2
- msr r9_fiq, r3
- msr r10_fiq, r4
- msr r11_fiq, r5
- msr r12_fiq, r6
- msr SP_fiq, r7
- msr LR_fiq, r8
- msr SPSR_fiq, r9
-
- pop_host_regs_mode irq
- pop_host_regs_mode und
- pop_host_regs_mode abt
- pop_host_regs_mode svc
-
- pop {r2, r3}
- msr SP_usr, r2
- mov lr, r3
- pop {r4-r12}
-
- pop {r2}
- msr ELR_hyp, r2
-.endm
-
-/*
- * Restore SP, LR and SPSR for a given mode. offset is the offset of
- * this mode's registers from the VCPU base.
- *
- * Assumes vcpu pointer in vcpu reg
- *
- * Clobbers r1, r2, r3, r4.
- */
-.macro restore_guest_regs_mode mode, offset
- add r1, vcpu, \offset
- ldm r1, {r2, r3, r4}
- msr SP_\mode, r2
- msr LR_\mode, r3
- msr SPSR_\mode, r4
-.endm
-
-/*
- * Restore all guest registers from the vcpu struct.
- *
- * Assumes vcpu pointer in vcpu reg
- *
- * Clobbers *all* registers.
- */
-.macro restore_guest_regs
- restore_guest_regs_mode svc, #VCPU_SVC_REGS
- restore_guest_regs_mode abt, #VCPU_ABT_REGS
- restore_guest_regs_mode und, #VCPU_UND_REGS
- restore_guest_regs_mode irq, #VCPU_IRQ_REGS
-
- add r1, vcpu, #VCPU_FIQ_REGS
- ldm r1, {r2-r9}
- msr r8_fiq, r2
- msr r9_fiq, r3
- msr r10_fiq, r4
- msr r11_fiq, r5
- msr r12_fiq, r6
- msr SP_fiq, r7
- msr LR_fiq, r8
- msr SPSR_fiq, r9
-
- @ Load return state
- ldr r2, [vcpu, #VCPU_PC]
- ldr r3, [vcpu, #VCPU_CPSR]
- msr ELR_hyp, r2
- msr SPSR_cxsf, r3
-
- @ Load user registers
- ldr r2, [vcpu, #VCPU_USR_SP]
- ldr r3, [vcpu, #VCPU_USR_LR]
- msr SP_usr, r2
- mov lr, r3
- add vcpu, vcpu, #(VCPU_USR_REGS)
- ldm vcpu, {r0-r12}
-.endm
-
-/*
- * Save SP, LR and SPSR for a given mode. offset is the offset of
- * this mode's registers from the VCPU base.
- *
- * Assumes vcpu pointer in vcpu reg
- *
- * Clobbers r2, r3, r4, r5.
- */
-.macro save_guest_regs_mode mode, offset
- add r2, vcpu, \offset
- mrs r3, SP_\mode
- mrs r4, LR_\mode
- mrs r5, SPSR_\mode
- stm r2, {r3, r4, r5}
-.endm
-
-/*
- * Save all guest registers to the vcpu struct
- * Expects guest's r0, r1, r2 on the stack.
- *
- * Assumes vcpu pointer in vcpu reg
- *
- * Clobbers r2, r3, r4, r5.
- */
-.macro save_guest_regs
- @ Store usr registers
- add r2, vcpu, #VCPU_USR_REG(3)
- stm r2, {r3-r12}
- add r2, vcpu, #VCPU_USR_REG(0)
- pop {r3, r4, r5} @ r0, r1, r2
- stm r2, {r3, r4, r5}
- mrs r2, SP_usr
- mov r3, lr
- str r2, [vcpu, #VCPU_USR_SP]
- str r3, [vcpu, #VCPU_USR_LR]
-
- @ Store return state
- mrs r2, ELR_hyp
- mrs r3, spsr
- str r2, [vcpu, #VCPU_PC]
- str r3, [vcpu, #VCPU_CPSR]
-
- @ Store other guest registers
- save_guest_regs_mode svc, #VCPU_SVC_REGS
- save_guest_regs_mode abt, #VCPU_ABT_REGS
- save_guest_regs_mode und, #VCPU_UND_REGS
- save_guest_regs_mode irq, #VCPU_IRQ_REGS
-.endm
-
-/* Reads cp15 registers from hardware and stores them in memory
- * @store_to_vcpu: If 0, registers are written in-order to the stack,
- * otherwise to the VCPU struct pointed to by vcpup
- *
- * Assumes vcpu pointer in vcpu reg
- *
- * Clobbers r2 - r12
- */
-.macro read_cp15_state store_to_vcpu
- mrc p15, 0, r2, c1, c0, 0 @ SCTLR
- mrc p15, 0, r3, c1, c0, 2 @ CPACR
- mrc p15, 0, r4, c2, c0, 2 @ TTBCR
- mrc p15, 0, r5, c3, c0, 0 @ DACR
- mrrc p15, 0, r6, r7, c2 @ TTBR 0
- mrrc p15, 1, r8, r9, c2 @ TTBR 1
- mrc p15, 0, r10, c10, c2, 0 @ PRRR
- mrc p15, 0, r11, c10, c2, 1 @ NMRR
- mrc p15, 2, r12, c0, c0, 0 @ CSSELR
-
- .if \store_to_vcpu == 0
- push {r2-r12} @ Push CP15 registers
- .else
- str r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
- str r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
- str r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
- str r5, [vcpu, #CP15_OFFSET(c3_DACR)]
- add r2, vcpu, #CP15_OFFSET(c2_TTBR0)
- strd r6, r7, [r2]
- add r2, vcpu, #CP15_OFFSET(c2_TTBR1)
- strd r8, r9, [r2]
- str r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
- str r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
- str r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
- .endif
-
- mrc p15, 0, r2, c13, c0, 1 @ CID
- mrc p15, 0, r3, c13, c0, 2 @ TID_URW
- mrc p15, 0, r4, c13, c0, 3 @ TID_URO
- mrc p15, 0, r5, c13, c0, 4 @ TID_PRIV
- mrc p15, 0, r6, c5, c0, 0 @ DFSR
- mrc p15, 0, r7, c5, c0, 1 @ IFSR
- mrc p15, 0, r8, c5, c1, 0 @ ADFSR
- mrc p15, 0, r9, c5, c1, 1 @ AIFSR
- mrc p15, 0, r10, c6, c0, 0 @ DFAR
- mrc p15, 0, r11, c6, c0, 2 @ IFAR
- mrc p15, 0, r12, c12, c0, 0 @ VBAR
-
- .if \store_to_vcpu == 0
- push {r2-r12} @ Push CP15 registers
- .else
- str r2, [vcpu, #CP15_OFFSET(c13_CID)]
- str r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
- str r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
- str r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
- str r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
- str r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
- str r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
- str r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
- str r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
- str r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
- str r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
- .endif
-
- mrc p15, 0, r2, c14, c1, 0 @ CNTKCTL
- mrrc p15, 0, r4, r5, c7 @ PAR
- mrc p15, 0, r6, c10, c3, 0 @ AMAIR0
- mrc p15, 0, r7, c10, c3, 1 @ AMAIR1
-
- .if \store_to_vcpu == 0
- push {r2,r4-r7}
- .else
- str r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
- add r12, vcpu, #CP15_OFFSET(c7_PAR)
- strd r4, r5, [r12]
- str r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
- str r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
- .endif
-.endm
-
-/*
- * Reads cp15 registers from memory and writes them to hardware
- * @read_from_vcpu: If 0, registers are read in-order from the stack,
- * otherwise from the VCPU struct pointed to by vcpup
- *
- * Assumes vcpu pointer in vcpu reg
- */
-.macro write_cp15_state read_from_vcpu
- .if \read_from_vcpu == 0
- pop {r2,r4-r7}
- .else
- ldr r2, [vcpu, #CP15_OFFSET(c14_CNTKCTL)]
- add r12, vcpu, #CP15_OFFSET(c7_PAR)
- ldrd r4, r5, [r12]
- ldr r6, [vcpu, #CP15_OFFSET(c10_AMAIR0)]
- ldr r7, [vcpu, #CP15_OFFSET(c10_AMAIR1)]
- .endif
-
- mcr p15, 0, r2, c14, c1, 0 @ CNTKCTL
- mcrr p15, 0, r4, r5, c7 @ PAR
- mcr p15, 0, r6, c10, c3, 0 @ AMAIR0
- mcr p15, 0, r7, c10, c3, 1 @ AMAIR1
-
- .if \read_from_vcpu == 0
- pop {r2-r12}
- .else
- ldr r2, [vcpu, #CP15_OFFSET(c13_CID)]
- ldr r3, [vcpu, #CP15_OFFSET(c13_TID_URW)]
- ldr r4, [vcpu, #CP15_OFFSET(c13_TID_URO)]
- ldr r5, [vcpu, #CP15_OFFSET(c13_TID_PRIV)]
- ldr r6, [vcpu, #CP15_OFFSET(c5_DFSR)]
- ldr r7, [vcpu, #CP15_OFFSET(c5_IFSR)]
- ldr r8, [vcpu, #CP15_OFFSET(c5_ADFSR)]
- ldr r9, [vcpu, #CP15_OFFSET(c5_AIFSR)]
- ldr r10, [vcpu, #CP15_OFFSET(c6_DFAR)]
- ldr r11, [vcpu, #CP15_OFFSET(c6_IFAR)]
- ldr r12, [vcpu, #CP15_OFFSET(c12_VBAR)]
- .endif
-
- mcr p15, 0, r2, c13, c0, 1 @ CID
- mcr p15, 0, r3, c13, c0, 2 @ TID_URW
- mcr p15, 0, r4, c13, c0, 3 @ TID_URO
- mcr p15, 0, r5, c13, c0, 4 @ TID_PRIV
- mcr p15, 0, r6, c5, c0, 0 @ DFSR
- mcr p15, 0, r7, c5, c0, 1 @ IFSR
- mcr p15, 0, r8, c5, c1, 0 @ ADFSR
- mcr p15, 0, r9, c5, c1, 1 @ AIFSR
- mcr p15, 0, r10, c6, c0, 0 @ DFAR
- mcr p15, 0, r11, c6, c0, 2 @ IFAR
- mcr p15, 0, r12, c12, c0, 0 @ VBAR
-
- .if \read_from_vcpu == 0
- pop {r2-r12}
- .else
- ldr r2, [vcpu, #CP15_OFFSET(c1_SCTLR)]
- ldr r3, [vcpu, #CP15_OFFSET(c1_CPACR)]
- ldr r4, [vcpu, #CP15_OFFSET(c2_TTBCR)]
- ldr r5, [vcpu, #CP15_OFFSET(c3_DACR)]
- add r12, vcpu, #CP15_OFFSET(c2_TTBR0)
- ldrd r6, r7, [r12]
- add r12, vcpu, #CP15_OFFSET(c2_TTBR1)
- ldrd r8, r9, [r12]
- ldr r10, [vcpu, #CP15_OFFSET(c10_PRRR)]
- ldr r11, [vcpu, #CP15_OFFSET(c10_NMRR)]
- ldr r12, [vcpu, #CP15_OFFSET(c0_CSSELR)]
- .endif
-
- mcr p15, 0, r2, c1, c0, 0 @ SCTLR
- mcr p15, 0, r3, c1, c0, 2 @ CPACR
- mcr p15, 0, r4, c2, c0, 2 @ TTBCR
- mcr p15, 0, r5, c3, c0, 0 @ DACR
- mcrr p15, 0, r6, r7, c2 @ TTBR 0
- mcrr p15, 1, r8, r9, c2 @ TTBR 1
- mcr p15, 0, r10, c10, c2, 0 @ PRRR
- mcr p15, 0, r11, c10, c2, 1 @ NMRR
- mcr p15, 2, r12, c0, c0, 0 @ CSSELR
-.endm
-
-/*
- * Save the VGIC CPU state into memory
- *
- * Assumes vcpu pointer in vcpu reg
- */
-.macro save_vgic_state
- /* Get VGIC VCTRL base into r2 */
- ldr r2, [vcpu, #VCPU_KVM]
- ldr r2, [r2, #KVM_VGIC_VCTRL]
- cmp r2, #0
- beq 2f
-
- /* Compute the address of struct vgic_cpu */
- add r11, vcpu, #VCPU_VGIC_CPU
-
- /* Save all interesting registers */
- ldr r4, [r2, #GICH_VMCR]
- ldr r5, [r2, #GICH_MISR]
- ldr r6, [r2, #GICH_EISR0]
- ldr r7, [r2, #GICH_EISR1]
- ldr r8, [r2, #GICH_ELRSR0]
- ldr r9, [r2, #GICH_ELRSR1]
- ldr r10, [r2, #GICH_APR]
-ARM_BE8(rev r4, r4 )
-ARM_BE8(rev r5, r5 )
-ARM_BE8(rev r6, r6 )
-ARM_BE8(rev r7, r7 )
-ARM_BE8(rev r8, r8 )
-ARM_BE8(rev r9, r9 )
-ARM_BE8(rev r10, r10 )
-
- str r4, [r11, #VGIC_V2_CPU_VMCR]
- str r5, [r11, #VGIC_V2_CPU_MISR]
-#ifdef CONFIG_CPU_ENDIAN_BE8
- str r6, [r11, #(VGIC_V2_CPU_EISR + 4)]
- str r7, [r11, #VGIC_V2_CPU_EISR]
- str r8, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
- str r9, [r11, #VGIC_V2_CPU_ELRSR]
-#else
- str r6, [r11, #VGIC_V2_CPU_EISR]
- str r7, [r11, #(VGIC_V2_CPU_EISR + 4)]
- str r8, [r11, #VGIC_V2_CPU_ELRSR]
- str r9, [r11, #(VGIC_V2_CPU_ELRSR + 4)]
-#endif
- str r10, [r11, #VGIC_V2_CPU_APR]
-
- /* Clear GICH_HCR */
- mov r5, #0
- str r5, [r2, #GICH_HCR]
-
- /* Save list registers */
- add r2, r2, #GICH_LR0
- add r3, r11, #VGIC_V2_CPU_LR
- ldr r4, [r11, #VGIC_CPU_NR_LR]
-1: ldr r6, [r2], #4
-ARM_BE8(rev r6, r6 )
- str r6, [r3], #4
- subs r4, r4, #1
- bne 1b
-2:
-.endm
-
-/*
- * Restore the VGIC CPU state from memory
- *
- * Assumes vcpu pointer in vcpu reg
- */
-.macro restore_vgic_state
- /* Get VGIC VCTRL base into r2 */
- ldr r2, [vcpu, #VCPU_KVM]
- ldr r2, [r2, #KVM_VGIC_VCTRL]
- cmp r2, #0
- beq 2f
-
- /* Compute the address of struct vgic_cpu */
- add r11, vcpu, #VCPU_VGIC_CPU
-
- /* We only restore a minimal set of registers */
- ldr r3, [r11, #VGIC_V2_CPU_HCR]
- ldr r4, [r11, #VGIC_V2_CPU_VMCR]
- ldr r8, [r11, #VGIC_V2_CPU_APR]
-ARM_BE8(rev r3, r3 )
-ARM_BE8(rev r4, r4 )
-ARM_BE8(rev r8, r8 )
-
- str r3, [r2, #GICH_HCR]
- str r4, [r2, #GICH_VMCR]
- str r8, [r2, #GICH_APR]
-
- /* Restore list registers */
- add r2, r2, #GICH_LR0
- add r3, r11, #VGIC_V2_CPU_LR
- ldr r4, [r11, #VGIC_CPU_NR_LR]
-1: ldr r6, [r3], #4
-ARM_BE8(rev r6, r6 )
- str r6, [r2], #4
- subs r4, r4, #1
- bne 1b
-2:
-.endm
-
-#define CNTHCTL_PL1PCTEN (1 << 0)
-#define CNTHCTL_PL1PCEN (1 << 1)
-
-/*
- * Save the timer state onto the VCPU and allow physical timer/counter access
- * for the host.
- *
- * Assumes vcpu pointer in vcpu reg
- * Clobbers r2-r5
- */
-.macro save_timer_state
- ldr r4, [vcpu, #VCPU_KVM]
- ldr r2, [r4, #KVM_TIMER_ENABLED]
- cmp r2, #0
- beq 1f
-
- mrc p15, 0, r2, c14, c3, 1 @ CNTV_CTL
- str r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
-
- isb
-
- mrrc p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
- ldr r4, =VCPU_TIMER_CNTV_CVAL
- add r5, vcpu, r4
- strd r2, r3, [r5]
-
- @ Ensure host CNTVCT == CNTPCT
- mov r2, #0
- mcrr p15, 4, r2, r2, c14 @ CNTVOFF
-
-1:
- mov r2, #0 @ Clear ENABLE
- mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
-
- @ Allow physical timer/counter access for the host
- mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
- orr r2, r2, #(CNTHCTL_PL1PCEN | CNTHCTL_PL1PCTEN)
- mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
-.endm
-
-/*
- * Load the timer state from the VCPU and deny physical timer/counter access
- * for the host.
- *
- * Assumes vcpu pointer in vcpu reg
- * Clobbers r2-r5
- */
-.macro restore_timer_state
- @ Disallow physical timer access for the guest
- @ Physical counter access is allowed
- mrc p15, 4, r2, c14, c1, 0 @ CNTHCTL
- orr r2, r2, #CNTHCTL_PL1PCTEN
- bic r2, r2, #CNTHCTL_PL1PCEN
- mcr p15, 4, r2, c14, c1, 0 @ CNTHCTL
-
- ldr r4, [vcpu, #VCPU_KVM]
- ldr r2, [r4, #KVM_TIMER_ENABLED]
- cmp r2, #0
- beq 1f
-
- ldr r2, [r4, #KVM_TIMER_CNTVOFF]
- ldr r3, [r4, #(KVM_TIMER_CNTVOFF + 4)]
- mcrr p15, 4, rr_lo_hi(r2, r3), c14 @ CNTVOFF
-
- ldr r4, =VCPU_TIMER_CNTV_CVAL
- add r5, vcpu, r4
- ldrd r2, r3, [r5]
- mcrr p15, 3, rr_lo_hi(r2, r3), c14 @ CNTV_CVAL
- isb
-
- ldr r2, [vcpu, #VCPU_TIMER_CNTV_CTL]
- and r2, r2, #3
- mcr p15, 0, r2, c14, c3, 1 @ CNTV_CTL
-1:
-.endm
-
-.equ vmentry, 0
-.equ vmexit, 1
-
-/* Configures the HSTR (Hyp System Trap Register) on entry/return
- * (hardware reset value is 0) */
-.macro set_hstr operation
- mrc p15, 4, r2, c1, c1, 3
- ldr r3, =HSTR_T(15)
- .if \operation == vmentry
- orr r2, r2, r3 @ Trap CR{15}
- .else
- bic r2, r2, r3 @ Don't trap any CRx accesses
- .endif
- mcr p15, 4, r2, c1, c1, 3
-.endm
-
-/* Configures the HCPTR (Hyp Coprocessor Trap Register) on entry/return
- * (hardware reset value is 0). Keep previous value in r2.
- * An ISB is emited on vmexit/vmtrap, but executed on vmexit only if
- * VFP wasn't already enabled (always executed on vmtrap).
- * If a label is specified with vmexit, it is branched to if VFP wasn't
- * enabled.
- */
-.macro set_hcptr operation, mask, label = none
- mrc p15, 4, r2, c1, c1, 2
- ldr r3, =\mask
- .if \operation == vmentry
- orr r3, r2, r3 @ Trap coproc-accesses defined in mask
- .else
- bic r3, r2, r3 @ Don't trap defined coproc-accesses
- .endif
- mcr p15, 4, r3, c1, c1, 2
- .if \operation != vmentry
- .if \operation == vmexit
- tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11))
- beq 1f
- .endif
- isb
- .if \label != none
- b \label
- .endif
-1:
- .endif
-.endm
-
-/* Configures the HDCR (Hyp Debug Configuration Register) on entry/return
- * (hardware reset value is 0) */
-.macro set_hdcr operation
- mrc p15, 4, r2, c1, c1, 1
- ldr r3, =(HDCR_TPM|HDCR_TPMCR)
- .if \operation == vmentry
- orr r2, r2, r3 @ Trap some perfmon accesses
- .else
- bic r2, r2, r3 @ Don't trap any perfmon accesses
- .endif
- mcr p15, 4, r2, c1, c1, 1
-.endm
-
-/* Enable/Disable: stage-2 trans., trap interrupts, trap wfi, trap smc */
-.macro configure_hyp_role operation
- .if \operation == vmentry
- ldr r2, [vcpu, #VCPU_HCR]
- ldr r3, [vcpu, #VCPU_IRQ_LINES]
- orr r2, r2, r3
- .else
- mov r2, #0
- .endif
- mcr p15, 4, r2, c1, c1, 0 @ HCR
-.endm
-
-.macro load_vcpu
- mrc p15, 4, vcpu, c13, c0, 2 @ HTPIDR
-.endm
diff --git a/arch/arm/kvm/mmio.c b/arch/arm/kvm/mmio.c
index 7f33b20..0f6600f 100644
--- a/arch/arm/kvm/mmio.c
+++ b/arch/arm/kvm/mmio.c
@@ -206,7 +206,8 @@
run->mmio.is_write = is_write;
run->mmio.phys_addr = fault_ipa;
run->mmio.len = len;
- memcpy(run->mmio.data, data_buf, len);
+ if (is_write)
+ memcpy(run->mmio.data, data_buf, len);
if (!ret) {
/* We handled the access successfully in the kernel. */
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index aba61fd..58dbd5c 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -28,6 +28,7 @@
#include <asm/kvm_mmio.h>
#include <asm/kvm_asm.h>
#include <asm/kvm_emulate.h>
+#include <asm/virt.h>
#include "trace.h"
@@ -598,6 +599,9 @@
unsigned long start = KERN_TO_HYP((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to);
+ if (is_kernel_in_hyp_mode())
+ return 0;
+
start = start & PAGE_MASK;
end = PAGE_ALIGN(end);
@@ -630,6 +634,9 @@
unsigned long start = KERN_TO_HYP((unsigned long)from);
unsigned long end = KERN_TO_HYP((unsigned long)to);
+ if (is_kernel_in_hyp_mode())
+ return 0;
+
/* Check for a valid kernel IO mapping */
if (!is_vmalloc_addr(from) || !is_vmalloc_addr(to - 1))
return -EINVAL;
@@ -1431,6 +1438,22 @@
}
/*
+ * Check for a cache maintenance operation. Since we
+ * ended-up here, we know it is outside of any memory
+ * slot. But we can't find out if that is for a device,
+ * or if the guest is just being stupid. The only thing
+ * we know for sure is that this range cannot be cached.
+ *
+ * So let's assume that the guest is just being
+ * cautious, and skip the instruction.
+ */
+ if (kvm_vcpu_dabt_is_cm(vcpu)) {
+ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
+ ret = 1;
+ goto out_unlock;
+ }
+
+ /*
* The IPA is reported as [MAX:12], so we need to
* complement it with the bottom 12 bits from the
* faulting VA. This is always 12 bits, irrespective
diff --git a/arch/arm/kvm/reset.c b/arch/arm/kvm/reset.c
index eeb8585..0048b5a 100644
--- a/arch/arm/kvm/reset.c
+++ b/arch/arm/kvm/reset.c
@@ -71,7 +71,7 @@
}
/* Reset core registers */
- memcpy(&vcpu->arch.regs, reset_regs, sizeof(vcpu->arch.regs));
+ memcpy(&vcpu->arch.ctxt.gp_regs, reset_regs, sizeof(vcpu->arch.ctxt.gp_regs));
/* Reset CP15 registers */
kvm_reset_coprocs(vcpu);
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 8098272..bab814d 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -18,6 +18,7 @@
#include <asm/setup.h>
#include <asm/mach/arch.h>
+#include <asm/system_info.h>
#include "common.h"
@@ -77,12 +78,31 @@
NULL,
};
+/* Set system_rev from atags */
+static void __init rx51_set_system_rev(const struct tag *tags)
+{
+ const struct tag *tag;
+
+ if (tags->hdr.tag != ATAG_CORE)
+ return;
+
+ for_each_tag(tag, tags) {
+ if (tag->hdr.tag == ATAG_REVISION) {
+ system_rev = tag->u.revision.rev;
+ break;
+ }
+ }
+}
+
/* Legacy userspace on Nokia N900 needs ATAGS exported in /proc/atags,
* save them while the data is still not overwritten
*/
static void __init rx51_reserve(void)
{
- save_atags((const struct tag *)(PAGE_OFFSET + 0x100));
+ const struct tag *tags = (const struct tag *)(PAGE_OFFSET + 0x100);
+
+ save_atags(tags);
+ rx51_set_system_rev(tags);
omap_reserve();
}
diff --git a/arch/arm/mach-omap2/gpmc-onenand.c b/arch/arm/mach-omap2/gpmc-onenand.c
index 7b76ce0..8633c70 100644
--- a/arch/arm/mach-omap2/gpmc-onenand.c
+++ b/arch/arm/mach-omap2/gpmc-onenand.c
@@ -101,10 +101,8 @@
static void set_onenand_cfg(void __iomem *onenand_base)
{
- u32 reg;
+ u32 reg = ONENAND_SYS_CFG1_RDY | ONENAND_SYS_CFG1_INT;
- reg = readw(onenand_base + ONENAND_REG_SYS_CFG1);
- reg &= ~((0x7 << ONENAND_SYS_CFG1_BRL_SHIFT) | (0x7 << 9));
reg |= (latency << ONENAND_SYS_CFG1_BRL_SHIFT) |
ONENAND_SYS_CFG1_BL_16;
if (onenand_flags & ONENAND_FLAG_SYNCREAD)
@@ -123,6 +121,7 @@
reg |= ONENAND_SYS_CFG1_VHF;
else
reg &= ~ONENAND_SYS_CFG1_VHF;
+
writew(reg, onenand_base + ONENAND_REG_SYS_CFG1);
}
@@ -289,6 +288,7 @@
}
}
+ onenand_async.sync_write = true;
omap2_onenand_calc_async_timings(&t);
ret = gpmc_cs_program_settings(gpmc_onenand_data->cs, &onenand_async);
diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
index 0437537..f7ff3b9 100644
--- a/arch/arm/mach-omap2/omap_device.c
+++ b/arch/arm/mach-omap2/omap_device.c
@@ -191,12 +191,22 @@
{
struct platform_device *pdev = to_platform_device(dev);
struct omap_device *od;
+ int err;
switch (event) {
case BUS_NOTIFY_DEL_DEVICE:
if (pdev->archdata.od)
omap_device_delete(pdev->archdata.od);
break;
+ case BUS_NOTIFY_UNBOUND_DRIVER:
+ od = to_omap_device(pdev);
+ if (od && (od->_state == OMAP_DEVICE_STATE_ENABLED)) {
+ dev_info(dev, "enabled after unload, idling\n");
+ err = omap_device_idle(pdev);
+ if (err)
+ dev_err(dev, "failed to idle\n");
+ }
+ break;
case BUS_NOTIFY_ADD_DEVICE:
if (pdev->dev.of_node)
omap_device_build_from_dt(pdev);
@@ -602,8 +612,10 @@
int ret;
ret = omap_device_enable(pdev);
- if (ret)
+ if (ret) {
+ dev_err(dev, "use pm_runtime_put_sync_suspend() in driver?\n");
return ret;
+ }
return pm_generic_runtime_resume(dev);
}
diff --git a/arch/arm/mach-shmobile/common.h b/arch/arm/mach-shmobile/common.h
index 9cb1121..b3a4ed528 100644
--- a/arch/arm/mach-shmobile/common.h
+++ b/arch/arm/mach-shmobile/common.h
@@ -4,7 +4,6 @@
extern void shmobile_init_delay(void);
extern void shmobile_boot_vector(void);
extern unsigned long shmobile_boot_fn;
-extern unsigned long shmobile_boot_arg;
extern unsigned long shmobile_boot_size;
extern void shmobile_smp_boot(void);
extern void shmobile_smp_sleep(void);
diff --git a/arch/arm/mach-shmobile/headsmp-scu.S b/arch/arm/mach-shmobile/headsmp-scu.S
index fa5248c..5e503d9 100644
--- a/arch/arm/mach-shmobile/headsmp-scu.S
+++ b/arch/arm/mach-shmobile/headsmp-scu.S
@@ -38,9 +38,3 @@
b secondary_startup
ENDPROC(shmobile_boot_scu)
-
- .text
- .align 2
- .globl shmobile_scu_base
-shmobile_scu_base:
- .space 4
diff --git a/arch/arm/mach-shmobile/headsmp.S b/arch/arm/mach-shmobile/headsmp.S
index 330c1fc..32e0bf6 100644
--- a/arch/arm/mach-shmobile/headsmp.S
+++ b/arch/arm/mach-shmobile/headsmp.S
@@ -24,7 +24,6 @@
.arm
.align 12
ENTRY(shmobile_boot_vector)
- ldr r0, 2f
ldr r1, 1f
bx r1
@@ -34,9 +33,6 @@
.globl shmobile_boot_fn
shmobile_boot_fn:
1: .space 4
- .globl shmobile_boot_arg
-shmobile_boot_arg:
-2: .space 4
.globl shmobile_boot_size
shmobile_boot_size:
.long . - shmobile_boot_vector
@@ -46,13 +42,15 @@
*/
ENTRY(shmobile_smp_boot)
- @ r0 = MPIDR_HWID_BITMASK
mrc p15, 0, r1, c0, c0, 5 @ r1 = MPIDR
- and r0, r1, r0 @ r0 = cpu_logical_map() value
+ and r0, r1, #0xffffff @ MPIDR_HWID_BITMASK
+ @ r0 = cpu_logical_map() value
mov r1, #0 @ r1 = CPU index
- adr r5, 1f @ array of per-cpu mpidr values
- adr r6, 2f @ array of per-cpu functions
- adr r7, 3f @ array of per-cpu arguments
+ adr r2, 1f
+ ldmia r2, {r5, r6, r7}
+ add r5, r5, r2 @ array of per-cpu mpidr values
+ add r6, r6, r2 @ array of per-cpu functions
+ add r7, r7, r2 @ array of per-cpu arguments
shmobile_smp_boot_find_mpidr:
ldr r8, [r5, r1, lsl #2]
@@ -80,12 +78,18 @@
b shmobile_smp_boot
ENDPROC(shmobile_smp_sleep)
+ .align 2
+1: .long shmobile_smp_mpidr - .
+ .long shmobile_smp_fn - 1b
+ .long shmobile_smp_arg - 1b
+
+ .bss
.globl shmobile_smp_mpidr
shmobile_smp_mpidr:
-1: .space NR_CPUS * 4
+ .space NR_CPUS * 4
.globl shmobile_smp_fn
shmobile_smp_fn:
-2: .space NR_CPUS * 4
+ .space NR_CPUS * 4
.globl shmobile_smp_arg
shmobile_smp_arg:
-3: .space NR_CPUS * 4
+ .space NR_CPUS * 4
diff --git a/arch/arm/mach-shmobile/platsmp-apmu.c b/arch/arm/mach-shmobile/platsmp-apmu.c
index 911884f..aba75c8 100644
--- a/arch/arm/mach-shmobile/platsmp-apmu.c
+++ b/arch/arm/mach-shmobile/platsmp-apmu.c
@@ -123,7 +123,6 @@
{
/* install boot code shared by all CPUs */
shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
- shmobile_boot_arg = MPIDR_HWID_BITMASK;
/* perform per-cpu setup */
apmu_parse_cfg(apmu_init_cpu, apmu_config, num);
diff --git a/arch/arm/mach-shmobile/platsmp-scu.c b/arch/arm/mach-shmobile/platsmp-scu.c
index 6466311..081a097 100644
--- a/arch/arm/mach-shmobile/platsmp-scu.c
+++ b/arch/arm/mach-shmobile/platsmp-scu.c
@@ -17,6 +17,9 @@
#include <asm/smp_scu.h>
#include "common.h"
+
+void __iomem *shmobile_scu_base;
+
static int shmobile_smp_scu_notifier_call(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
@@ -41,7 +44,6 @@
{
/* install boot code shared by all CPUs */
shmobile_boot_fn = virt_to_phys(shmobile_smp_boot);
- shmobile_boot_arg = MPIDR_HWID_BITMASK;
/* enable SCU and cache coherency on booting CPU */
scu_enable(shmobile_scu_base);
diff --git a/arch/arm/mach-shmobile/smp-r8a7779.c b/arch/arm/mach-shmobile/smp-r8a7779.c
index b854fe2..0b024a9 100644
--- a/arch/arm/mach-shmobile/smp-r8a7779.c
+++ b/arch/arm/mach-shmobile/smp-r8a7779.c
@@ -92,8 +92,6 @@
{
/* Map the reset vector (in headsmp-scu.S, headsmp.S) */
__raw_writel(__pa(shmobile_boot_vector), AVECR);
- shmobile_boot_fn = virt_to_phys(shmobile_boot_scu);
- shmobile_boot_arg = (unsigned long)shmobile_scu_base;
/* setup r8a7779 specific SCU bits */
shmobile_scu_base = IOMEM(R8A7779_SCU_BASE);
diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
index 4b4058d..66353ca 100644
--- a/arch/arm/mm/mmap.c
+++ b/arch/arm/mm/mmap.c
@@ -173,7 +173,7 @@
{
unsigned long rnd;
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 8cc6228..cf118d9 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -750,6 +750,19 @@
not support these instructions and requires the kernel to be
built with binutils >= 2.25.
+config ARM64_VHE
+ bool "Enable support for Virtualization Host Extensions (VHE)"
+ default y
+ help
+ Virtualization Host Extensions (VHE) allow the kernel to run
+ directly at EL2 (instead of EL1) on processors that support
+ it. This leads to better performance for KVM, as they reduce
+ the cost of the world switch.
+
+ Selecting this option allows the VHE feature to be detected
+ at runtime, and does not affect processors that do not
+ implement this feature.
+
endmenu
endmenu
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 307237c..b5e3f6d 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -88,7 +88,7 @@
Image.%: vmlinux
$(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
-zinstall install: vmlinux
+zinstall install:
$(Q)$(MAKE) $(build)=$(boot) $@
%.dtb: scripts
diff --git a/arch/arm64/boot/Makefile b/arch/arm64/boot/Makefile
index abcbba2..305c552 100644
--- a/arch/arm64/boot/Makefile
+++ b/arch/arm64/boot/Makefile
@@ -34,10 +34,10 @@
$(obj)/Image.lzo: $(obj)/Image FORCE
$(call if_changed,lzo)
-install: $(obj)/Image
+install:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image System.map "$(INSTALL_PATH)"
-zinstall: $(obj)/Image.gz
+zinstall:
$(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
$(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/arm64/boot/install.sh b/arch/arm64/boot/install.sh
index 12ed78a..d91e1f0 100644
--- a/arch/arm64/boot/install.sh
+++ b/arch/arm64/boot/install.sh
@@ -20,6 +20,20 @@
# $4 - default install path (blank if root directory)
#
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo ' *** You need to run "make" before "make install".' 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
# User may have a custom install script
if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
diff --git a/arch/arm64/crypto/aes-glue.c b/arch/arm64/crypto/aes-glue.c
index 05d9e16..7a3d22a 100644
--- a/arch/arm64/crypto/aes-glue.c
+++ b/arch/arm64/crypto/aes-glue.c
@@ -294,7 +294,7 @@
.cra_blkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = 0,
.setkey = aes_setkey,
.encrypt = ecb_encrypt,
.decrypt = ecb_decrypt,
@@ -371,7 +371,7 @@
.cra_ablkcipher = {
.min_keysize = AES_MIN_KEY_SIZE,
.max_keysize = AES_MAX_KEY_SIZE,
- .ivsize = AES_BLOCK_SIZE,
+ .ivsize = 0,
.setkey = ablk_set_key,
.encrypt = ablk_encrypt,
.decrypt = ablk_decrypt,
diff --git a/arch/arm64/include/asm/arch_gicv3.h b/arch/arm64/include/asm/arch_gicv3.h
index 2731d3b..8ec88e5 100644
--- a/arch/arm64/include/asm/arch_gicv3.h
+++ b/arch/arm64/include/asm/arch_gicv3.h
@@ -103,6 +103,7 @@
u64 irqstat;
asm volatile("mrs_s %0, " __stringify(ICC_IAR1_EL1) : "=r" (irqstat));
+ dsb(sy);
return irqstat;
}
diff --git a/arch/arm64/include/asm/cpufeature.h b/arch/arm64/include/asm/cpufeature.h
index 8f271b8..a5c769b 100644
--- a/arch/arm64/include/asm/cpufeature.h
+++ b/arch/arm64/include/asm/cpufeature.h
@@ -30,8 +30,12 @@
#define ARM64_HAS_LSE_ATOMICS 5
#define ARM64_WORKAROUND_CAVIUM_23154 6
#define ARM64_WORKAROUND_834220 7
+/* #define ARM64_HAS_NO_HW_PREFETCH 8 */
+/* #define ARM64_HAS_UAO 9 */
+/* #define ARM64_ALT_PAN_NOT_UAO 10 */
+#define ARM64_HAS_VIRT_HOST_EXTN 11
-#define ARM64_NCAPS 8
+#define ARM64_NCAPS 12
#ifndef __ASSEMBLY__
diff --git a/arch/arm64/include/asm/hw_breakpoint.h b/arch/arm64/include/asm/hw_breakpoint.h
index 9732908..115ea2a 100644
--- a/arch/arm64/include/asm/hw_breakpoint.h
+++ b/arch/arm64/include/asm/hw_breakpoint.h
@@ -18,6 +18,7 @@
#include <asm/cputype.h>
#include <asm/cpufeature.h>
+#include <asm/virt.h>
#ifdef __KERNEL__
@@ -35,10 +36,21 @@
struct arch_hw_breakpoint_ctrl ctrl;
};
+/* Privilege Levels */
+#define AARCH64_BREAKPOINT_EL1 1
+#define AARCH64_BREAKPOINT_EL0 2
+
+#define DBG_HMC_HYP (1 << 13)
+
static inline u32 encode_ctrl_reg(struct arch_hw_breakpoint_ctrl ctrl)
{
- return (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) |
+ u32 val = (ctrl.len << 5) | (ctrl.type << 3) | (ctrl.privilege << 1) |
ctrl.enabled;
+
+ if (is_kernel_in_hyp_mode() && ctrl.privilege == AARCH64_BREAKPOINT_EL1)
+ val |= DBG_HMC_HYP;
+
+ return val;
}
static inline void decode_ctrl_reg(u32 reg,
@@ -61,10 +73,6 @@
#define ARM_BREAKPOINT_STORE 2
#define AARCH64_ESR_ACCESS_MASK (1 << 6)
-/* Privilege Levels */
-#define AARCH64_BREAKPOINT_EL1 1
-#define AARCH64_BREAKPOINT_EL0 2
-
/* Lengths */
#define ARM_BREAKPOINT_LEN_1 0x1
#define ARM_BREAKPOINT_LEN_2 0x3
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h
index bef6e92..b56a0a8 100644
--- a/arch/arm64/include/asm/kvm_arm.h
+++ b/arch/arm64/include/asm/kvm_arm.h
@@ -23,6 +23,7 @@
#include <asm/types.h>
/* Hyp Configuration Register (HCR) bits */
+#define HCR_E2H (UL(1) << 34)
#define HCR_ID (UL(1) << 33)
#define HCR_CD (UL(1) << 32)
#define HCR_RW_SHIFT 31
@@ -81,7 +82,7 @@
HCR_AMO | HCR_SWIO | HCR_TIDCP | HCR_RW)
#define HCR_VIRT_EXCP_MASK (HCR_VA | HCR_VI | HCR_VF)
#define HCR_INT_OVERRIDE (HCR_FMO | HCR_IMO)
-
+#define HCR_HOST_VHE_FLAGS (HCR_RW | HCR_TGE | HCR_E2H)
/* Hyp System Control Register (SCTLR_EL2) bits */
#define SCTLR_EL2_EE (1 << 25)
@@ -107,8 +108,6 @@
#define TCR_EL2_MASK (TCR_EL2_TG0 | TCR_EL2_SH0 | \
TCR_EL2_ORGN0 | TCR_EL2_IRGN0 | TCR_EL2_T0SZ)
-#define TCR_EL2_FLAGS (TCR_EL2_RES1 | TCR_EL2_PS_40B)
-
/* VTCR_EL2 Registers bits */
#define VTCR_EL2_RES1 (1 << 31)
#define VTCR_EL2_PS_MASK (7 << 16)
@@ -218,4 +217,7 @@
ECN(SOFTSTP_CUR), ECN(WATCHPT_LOW), ECN(WATCHPT_CUR), \
ECN(BKPT32), ECN(VECTOR32), ECN(BRK64)
+#define CPACR_EL1_FPEN (3 << 20)
+#define CPACR_EL1_TTA (1 << 28)
+
#endif /* __ARM64_KVM_ARM_H__ */
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h
index 52b777b..2d02ba6 100644
--- a/arch/arm64/include/asm/kvm_asm.h
+++ b/arch/arm64/include/asm/kvm_asm.h
@@ -35,9 +35,6 @@
extern char __kvm_hyp_vector[];
-#define __kvm_hyp_code_start __hyp_text_start
-#define __kvm_hyp_code_end __hyp_text_end
-
extern void __kvm_flush_vm_context(void);
extern void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
extern void __kvm_tlb_flush_vmid(struct kvm *kvm);
@@ -45,9 +42,12 @@
extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
extern u64 __vgic_v3_get_ich_vtr_el2(void);
+extern void __vgic_v3_init_lrs(void);
extern u32 __kvm_get_mdcr_el2(void);
+extern void __init_stage2_translation(void);
+
#endif
#endif /* __ARM_KVM_ASM_H__ */
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 779a587..40bc168 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -29,6 +29,7 @@
#include <asm/kvm_mmio.h>
#include <asm/ptrace.h>
#include <asm/cputype.h>
+#include <asm/virt.h>
unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
@@ -43,6 +44,8 @@
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
+ if (is_kernel_in_hyp_mode())
+ vcpu->arch.hcr_el2 |= HCR_E2H;
if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
vcpu->arch.hcr_el2 &= ~HCR_RW;
}
@@ -189,6 +192,11 @@
return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
}
+static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
+{
+ return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
+}
+
static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
{
return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index 689d4c9..71fa6fe 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -25,7 +25,9 @@
#include <linux/types.h>
#include <linux/kvm_types.h>
#include <asm/kvm.h>
+#include <asm/kvm_asm.h>
#include <asm/kvm_mmio.h>
+#include <asm/kvm_perf_event.h>
#define __KVM_HAVE_ARCH_INTC_INITIALIZED
@@ -36,10 +38,11 @@
#include <kvm/arm_vgic.h>
#include <kvm/arm_arch_timer.h>
+#include <kvm/arm_pmu.h>
#define KVM_MAX_VCPUS VGIC_V3_MAX_CPUS
-#define KVM_VCPU_MAX_FEATURES 3
+#define KVM_VCPU_MAX_FEATURES 4
int __attribute_const__ kvm_target_cpu(void);
int kvm_reset_vcpu(struct kvm_vcpu *vcpu);
@@ -114,6 +117,21 @@
MDSCR_EL1, /* Monitor Debug System Control Register */
MDCCINT_EL1, /* Monitor Debug Comms Channel Interrupt Enable Reg */
+ /* Performance Monitors Registers */
+ PMCR_EL0, /* Control Register */
+ PMSELR_EL0, /* Event Counter Selection Register */
+ PMEVCNTR0_EL0, /* Event Counter Register (0-30) */
+ PMEVCNTR30_EL0 = PMEVCNTR0_EL0 + 30,
+ PMCCNTR_EL0, /* Cycle Counter Register */
+ PMEVTYPER0_EL0, /* Event Type Register (0-30) */
+ PMEVTYPER30_EL0 = PMEVTYPER0_EL0 + 30,
+ PMCCFILTR_EL0, /* Cycle Count Filter Register */
+ PMCNTENSET_EL0, /* Count Enable Set Register */
+ PMINTENSET_EL1, /* Interrupt Enable Set Register */
+ PMOVSSET_EL0, /* Overflow Flag Status Set Register */
+ PMSWINC_EL0, /* Software Increment Register */
+ PMUSERENR_EL0, /* User Enable Register */
+
/* 32bit specific registers. Keep them at the end of the range */
DACR32_EL2, /* Domain Access Control Register */
IFSR32_EL2, /* Instruction Fault Status Register */
@@ -211,6 +229,7 @@
/* VGIC state */
struct vgic_cpu vgic_cpu;
struct arch_timer_cpu timer_cpu;
+ struct kvm_pmu pmu;
/*
* Anything that is not used directly from assembly code goes
@@ -342,5 +361,18 @@
void kvm_arm_setup_debug(struct kvm_vcpu *vcpu);
void kvm_arm_clear_debug(struct kvm_vcpu *vcpu);
void kvm_arm_reset_debug_ptr(struct kvm_vcpu *vcpu);
+int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+
+/* #define kvm_call_hyp(f, ...) __kvm_call_hyp(kvm_ksym_ref(f), ##__VA_ARGS__) */
+
+static inline void __cpu_init_stage2(void)
+{
+ kvm_call_hyp(__init_stage2_translation);
+}
#endif /* __ARM64_KVM_HOST_H__ */
diff --git a/arch/arm64/include/asm/kvm_hyp.h b/arch/arm64/include/asm/kvm_hyp.h
new file mode 100644
index 0000000..a46b019
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_hyp.h
@@ -0,0 +1,181 @@
+/*
+ * Copyright (C) 2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ARM64_KVM_HYP_H__
+#define __ARM64_KVM_HYP_H__
+
+#include <linux/compiler.h>
+#include <linux/kvm_host.h>
+#include <asm/kvm_mmu.h>
+#include <asm/kvm_perf_event.h>
+#include <asm/sysreg.h>
+
+#define __hyp_text __section(.hyp.text) notrace
+
+static inline unsigned long __kern_hyp_va(unsigned long v)
+{
+ asm volatile(ALTERNATIVE("and %0, %0, %1",
+ "nop",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ : "+r" (v) : "i" (HYP_PAGE_OFFSET_MASK));
+ return v;
+}
+
+#define kern_hyp_va(v) (typeof(v))(__kern_hyp_va((unsigned long)(v)))
+
+static inline unsigned long __hyp_kern_va(unsigned long v)
+{
+ u64 offset = PAGE_OFFSET - HYP_PAGE_OFFSET;
+ asm volatile(ALTERNATIVE("add %0, %0, %1",
+ "nop",
+ ARM64_HAS_VIRT_HOST_EXTN)
+ : "+r" (v) : "r" (offset));
+ return v;
+}
+
+#define hyp_kern_va(v) (typeof(v))(__hyp_kern_va((unsigned long)(v)))
+
+#define read_sysreg_elx(r,nvh,vh) \
+ ({ \
+ u64 reg; \
+ asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##nvh),\
+ "mrs_s %0, " __stringify(r##vh),\
+ ARM64_HAS_VIRT_HOST_EXTN) \
+ : "=r" (reg)); \
+ reg; \
+ })
+
+#define write_sysreg_elx(v,r,nvh,vh) \
+ do { \
+ u64 __val = (u64)(v); \
+ asm volatile(ALTERNATIVE("msr " __stringify(r##nvh) ", %x0",\
+ "msr_s " __stringify(r##vh) ", %x0",\
+ ARM64_HAS_VIRT_HOST_EXTN) \
+ : : "rZ" (__val)); \
+ } while (0)
+
+/*
+ * Unified accessors for registers that have a different encoding
+ * between VHE and non-VHE. They must be specified without their "ELx"
+ * encoding.
+ */
+#define read_sysreg_el2(r) \
+ ({ \
+ u64 reg; \
+ asm volatile(ALTERNATIVE("mrs %0, " __stringify(r##_EL2),\
+ "mrs %0, " __stringify(r##_EL1),\
+ ARM64_HAS_VIRT_HOST_EXTN) \
+ : "=r" (reg)); \
+ reg; \
+ })
+
+#define write_sysreg_el2(v,r) \
+ do { \
+ u64 __val = (u64)(v); \
+ asm volatile(ALTERNATIVE("msr " __stringify(r##_EL2) ", %x0",\
+ "msr " __stringify(r##_EL1) ", %x0",\
+ ARM64_HAS_VIRT_HOST_EXTN) \
+ : : "rZ" (__val)); \
+ } while (0)
+
+#define read_sysreg_el0(r) read_sysreg_elx(r, _EL0, _EL02)
+#define write_sysreg_el0(v,r) write_sysreg_elx(v, r, _EL0, _EL02)
+#define read_sysreg_el1(r) read_sysreg_elx(r, _EL1, _EL12)
+#define write_sysreg_el1(v,r) write_sysreg_elx(v, r, _EL1, _EL12)
+
+/* The VHE specific system registers and their encoding */
+#define sctlr_EL12 sys_reg(3, 5, 1, 0, 0)
+#define cpacr_EL12 sys_reg(3, 5, 1, 0, 2)
+#define ttbr0_EL12 sys_reg(3, 5, 2, 0, 0)
+#define ttbr1_EL12 sys_reg(3, 5, 2, 0, 1)
+#define tcr_EL12 sys_reg(3, 5, 2, 0, 2)
+#define afsr0_EL12 sys_reg(3, 5, 5, 1, 0)
+#define afsr1_EL12 sys_reg(3, 5, 5, 1, 1)
+#define esr_EL12 sys_reg(3, 5, 5, 2, 0)
+#define far_EL12 sys_reg(3, 5, 6, 0, 0)
+#define mair_EL12 sys_reg(3, 5, 10, 2, 0)
+#define amair_EL12 sys_reg(3, 5, 10, 3, 0)
+#define vbar_EL12 sys_reg(3, 5, 12, 0, 0)
+#define contextidr_EL12 sys_reg(3, 5, 13, 0, 1)
+#define cntkctl_EL12 sys_reg(3, 5, 14, 1, 0)
+#define cntp_tval_EL02 sys_reg(3, 5, 14, 2, 0)
+#define cntp_ctl_EL02 sys_reg(3, 5, 14, 2, 1)
+#define cntp_cval_EL02 sys_reg(3, 5, 14, 2, 2)
+#define cntv_tval_EL02 sys_reg(3, 5, 14, 3, 0)
+#define cntv_ctl_EL02 sys_reg(3, 5, 14, 3, 1)
+#define cntv_cval_EL02 sys_reg(3, 5, 14, 3, 2)
+#define spsr_EL12 sys_reg(3, 5, 4, 0, 0)
+#define elr_EL12 sys_reg(3, 5, 4, 0, 1)
+
+/**
+ * hyp_alternate_select - Generates patchable code sequences that are
+ * used to switch between two implementations of a function, depending
+ * on the availability of a feature.
+ *
+ * @fname: a symbol name that will be defined as a function returning a
+ * function pointer whose type will match @orig and @alt
+ * @orig: A pointer to the default function, as returned by @fname when
+ * @cond doesn't hold
+ * @alt: A pointer to the alternate function, as returned by @fname
+ * when @cond holds
+ * @cond: a CPU feature (as described in asm/cpufeature.h)
+ */
+#define hyp_alternate_select(fname, orig, alt, cond) \
+typeof(orig) * __hyp_text fname(void) \
+{ \
+ typeof(alt) *val = orig; \
+ asm volatile(ALTERNATIVE("nop \n", \
+ "mov %0, %1 \n", \
+ cond) \
+ : "+r" (val) : "r" (alt)); \
+ return val; \
+}
+
+void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
+
+void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
+void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
+
+void __timer_save_state(struct kvm_vcpu *vcpu);
+void __timer_restore_state(struct kvm_vcpu *vcpu);
+
+void __sysreg_save_host_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_host_state(struct kvm_cpu_context *ctxt);
+void __sysreg_save_guest_state(struct kvm_cpu_context *ctxt);
+void __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt);
+void __sysreg32_save_state(struct kvm_vcpu *vcpu);
+void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
+
+void __debug_save_state(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug_arch *dbg,
+ struct kvm_cpu_context *ctxt);
+void __debug_restore_state(struct kvm_vcpu *vcpu,
+ struct kvm_guest_debug_arch *dbg,
+ struct kvm_cpu_context *ctxt);
+void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
+void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
+
+void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
+void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
+bool __fpsimd_enabled(void);
+
+u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
+void __noreturn __hyp_do_panic(unsigned long, ...);
+
+#endif /* __ARM64_KVM_HYP_H__ */
+
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h
index 7364339..9a9318a 100644
--- a/arch/arm64/include/asm/kvm_mmu.h
+++ b/arch/arm64/include/asm/kvm_mmu.h
@@ -23,13 +23,16 @@
#include <asm/cpufeature.h>
/*
- * As we only have the TTBR0_EL2 register, we cannot express
+ * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
* "negative" addresses. This makes it impossible to directly share
* mappings with the kernel.
*
* Instead, give the HYP mode its own VA region at a fixed offset from
* the kernel by just masking the top bits (which are all ones for a
* kernel address).
+ *
+ * ARMv8.1 (using VHE) does have a TTBR1_EL2, and doesn't use these
+ * macros (the entire kernel runs at EL2).
*/
#define HYP_PAGE_OFFSET_SHIFT VA_BITS
#define HYP_PAGE_OFFSET_MASK ((UL(1) << HYP_PAGE_OFFSET_SHIFT) - 1)
@@ -56,12 +59,19 @@
#ifdef __ASSEMBLY__
+#include <asm/alternative.h>
+#include <asm/cpufeature.h>
+
/*
* Convert a kernel VA into a HYP VA.
* reg: VA to be converted.
*/
.macro kern_hyp_va reg
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
and \reg, \reg, #HYP_PAGE_OFFSET_MASK
+alternative_else
+ nop
+alternative_endif
.endm
#else
diff --git a/arch/arm64/include/asm/kvm_perf_event.h b/arch/arm64/include/asm/kvm_perf_event.h
new file mode 100644
index 0000000..c18fdeb
--- /dev/null
+++ b/arch/arm64/include/asm/kvm_perf_event.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_KVM_PERF_EVENT_H
+#define __ASM_KVM_PERF_EVENT_H
+
+#define ARMV8_PMU_MAX_COUNTERS 32
+#define ARMV8_PMU_COUNTER_MASK (ARMV8_PMU_MAX_COUNTERS - 1)
+
+/*
+ * Per-CPU PMCR: config reg
+ */
+#define ARMV8_PMU_PMCR_E (1 << 0) /* Enable all counters */
+#define ARMV8_PMU_PMCR_P (1 << 1) /* Reset all counters */
+#define ARMV8_PMU_PMCR_C (1 << 2) /* Cycle counter reset */
+#define ARMV8_PMU_PMCR_D (1 << 3) /* CCNT counts every 64th cpu cycle */
+#define ARMV8_PMU_PMCR_X (1 << 4) /* Export to ETM */
+#define ARMV8_PMU_PMCR_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
+/* Determines which bit of PMCCNTR_EL0 generates an overflow */
+#define ARMV8_PMU_PMCR_LC (1 << 6)
+#define ARMV8_PMU_PMCR_N_SHIFT 11 /* Number of counters supported */
+#define ARMV8_PMU_PMCR_N_MASK 0x1f
+#define ARMV8_PMU_PMCR_MASK 0x7f /* Mask for writable bits */
+
+/*
+ * PMOVSR: counters overflow flag status reg
+ */
+#define ARMV8_PMU_OVSR_MASK 0xffffffff /* Mask for writable bits */
+#define ARMV8_PMU_OVERFLOWED_MASK ARMV8_PMU_OVSR_MASK
+
+/*
+ * PMXEVTYPER: Event selection reg
+ */
+#define ARMV8_PMU_EVTYPE_MASK 0xc80003ff /* Mask for writable bits */
+#define ARMV8_PMU_EVTYPE_EVENT 0x3ff /* Mask for EVENT bits */
+
+#define ARMV8_PMU_EVTYPE_EVENT_SW_INCR 0 /* Software increment event */
+
+/*
+ * Event filters for PMUv3
+ */
+#define ARMV8_PMU_EXCLUDE_EL1 (1 << 31)
+#define ARMV8_PMU_EXCLUDE_EL0 (1 << 30)
+#define ARMV8_PMU_INCLUDE_EL2 (1 << 27)
+
+/*
+ * PMUSERENR: user enable reg
+ */
+#define ARMV8_PMU_USERENR_MASK 0xf /* Mask for writable bits */
+#define ARMV8_PMU_USERENR_EN (1 << 0) /* PMU regs can be accessed at EL0 */
+#define ARMV8_PMU_USERENR_SW (1 << 1) /* PMSWINC can be written at EL0 */
+#define ARMV8_PMU_USERENR_CR (1 << 2) /* Cycle counter can be read at EL0 */
+#define ARMV8_PMU_USERENR_ER (1 << 3) /* Event counter can be read at EL0 */
+
+#endif
diff --git a/arch/arm64/include/asm/virt.h b/arch/arm64/include/asm/virt.h
index 7a5df52..9f22dd6 100644
--- a/arch/arm64/include/asm/virt.h
+++ b/arch/arm64/include/asm/virt.h
@@ -23,6 +23,8 @@
#ifndef __ASSEMBLY__
+#include <asm/ptrace.h>
+
/*
* __boot_cpu_mode records what mode CPUs were booted in.
* A correctly-implemented bootloader must start all CPUs in the same mode:
@@ -50,6 +52,14 @@
return __boot_cpu_mode[0] != __boot_cpu_mode[1];
}
+static inline bool is_kernel_in_hyp_mode(void)
+{
+ u64 el;
+
+ asm("mrs %0, CurrentEL" : "=r" (el));
+ return el == CurrentEL_EL2;
+}
+
/* The section containing the hypervisor text */
extern char __hyp_text_start[];
extern char __hyp_text_end[];
diff --git a/arch/arm64/include/uapi/asm/kvm.h b/arch/arm64/include/uapi/asm/kvm.h
index 2d4ca4b..f209ea1 100644
--- a/arch/arm64/include/uapi/asm/kvm.h
+++ b/arch/arm64/include/uapi/asm/kvm.h
@@ -94,6 +94,7 @@
#define KVM_ARM_VCPU_POWER_OFF 0 /* CPU is started in OFF state */
#define KVM_ARM_VCPU_EL1_32BIT 1 /* CPU running a 32bit VM */
#define KVM_ARM_VCPU_PSCI_0_2 2 /* CPU uses PSCI v0.2 */
+#define KVM_ARM_VCPU_PMU_V3 3 /* Support guest PMUv3 */
struct kvm_vcpu_init {
__u32 target;
@@ -204,6 +205,11 @@
#define KVM_DEV_ARM_VGIC_GRP_CTRL 4
#define KVM_DEV_ARM_VGIC_CTRL_INIT 0
+/* Device Control API on vcpu fd */
+#define KVM_ARM_VCPU_PMU_V3_CTRL 0
+#define KVM_ARM_VCPU_PMU_V3_IRQ 0
+#define KVM_ARM_VCPU_PMU_V3_INIT 1
+
/* KVM_IRQ_LINE irq field index values */
#define KVM_ARM_IRQ_TYPE_SHIFT 24
#define KVM_ARM_IRQ_TYPE_MASK 0xff
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index fffa4ac6..b0ab4e9 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -110,9 +110,6 @@
DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
DEFINE(VCPU_FPEXC32_EL2, offsetof(struct kvm_vcpu, arch.ctxt.sys_regs[FPEXC32_EL2]));
- DEFINE(VCPU_ESR_EL2, offsetof(struct kvm_vcpu, arch.fault.esr_el2));
- DEFINE(VCPU_FAR_EL2, offsetof(struct kvm_vcpu, arch.fault.far_el2));
- DEFINE(VCPU_HPFAR_EL2, offsetof(struct kvm_vcpu, arch.fault.hpfar_el2));
DEFINE(VCPU_HOST_CONTEXT, offsetof(struct kvm_vcpu, arch.host_cpu_context));
#endif
#ifdef CONFIG_CPU_PM
diff --git a/arch/arm64/kernel/cpufeature.c b/arch/arm64/kernel/cpufeature.c
index 5c90aa4..ba74519 100644
--- a/arch/arm64/kernel/cpufeature.c
+++ b/arch/arm64/kernel/cpufeature.c
@@ -26,6 +26,7 @@
#include <asm/cpu_ops.h>
#include <asm/processor.h>
#include <asm/sysreg.h>
+#include <asm/virt.h>
unsigned long elf_hwcap __read_mostly;
EXPORT_SYMBOL_GPL(elf_hwcap);
@@ -621,6 +622,11 @@
return has_sre;
}
+static bool runs_at_el2(const struct arm64_cpu_capabilities *entry)
+{
+ return is_kernel_in_hyp_mode();
+}
+
static const struct arm64_cpu_capabilities arm64_features[] = {
{
.desc = "GIC system register CPU interface",
@@ -651,6 +657,11 @@
.min_field_value = 2,
},
#endif /* CONFIG_AS_LSE && CONFIG_ARM64_LSE_ATOMICS */
+ {
+ .desc = "Virtualization Host Extensions",
+ .capability = ARM64_HAS_VIRT_HOST_EXTN,
+ .matches = runs_at_el2,
+ },
{},
};
diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
index 8aee3ae..c536c9e 100644
--- a/arch/arm64/kernel/debug-monitors.c
+++ b/arch/arm64/kernel/debug-monitors.c
@@ -226,11 +226,28 @@
return retval;
}
+static void send_user_sigtrap(int si_code)
+{
+ struct pt_regs *regs = current_pt_regs();
+ siginfo_t info = {
+ .si_signo = SIGTRAP,
+ .si_errno = 0,
+ .si_code = si_code,
+ .si_addr = (void __user *)instruction_pointer(regs),
+ };
+
+ if (WARN_ON(!user_mode(regs)))
+ return;
+
+ if (interrupts_enabled(regs))
+ local_irq_enable();
+
+ force_sig_info(SIGTRAP, &info, current);
+}
+
static int single_step_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- siginfo_t info;
-
/*
* If we are stepping a pending breakpoint, call the hw_breakpoint
* handler first.
@@ -239,11 +256,7 @@
return 0;
if (user_mode(regs)) {
- info.si_signo = SIGTRAP;
- info.si_errno = 0;
- info.si_code = TRAP_HWBKPT;
- info.si_addr = (void __user *)instruction_pointer(regs);
- force_sig_info(SIGTRAP, &info, current);
+ send_user_sigtrap(TRAP_HWBKPT);
/*
* ptrace will disable single step unless explicitly
@@ -307,17 +320,8 @@
static int brk_handler(unsigned long addr, unsigned int esr,
struct pt_regs *regs)
{
- siginfo_t info;
-
if (user_mode(regs)) {
- info = (siginfo_t) {
- .si_signo = SIGTRAP,
- .si_errno = 0,
- .si_code = TRAP_BRKPT,
- .si_addr = (void __user *)instruction_pointer(regs),
- };
-
- force_sig_info(SIGTRAP, &info, current);
+ send_user_sigtrap(TRAP_BRKPT);
} else if (call_break_hook(regs, esr) != DBG_HOOK_HANDLED) {
pr_warning("Unexpected kernel BRK exception at EL1\n");
return -EFAULT;
@@ -328,7 +332,6 @@
int aarch32_break_handler(struct pt_regs *regs)
{
- siginfo_t info;
u32 arm_instr;
u16 thumb_instr;
bool bp = false;
@@ -359,14 +362,7 @@
if (!bp)
return -EFAULT;
- info = (siginfo_t) {
- .si_signo = SIGTRAP,
- .si_errno = 0,
- .si_code = TRAP_BRKPT,
- .si_addr = pc,
- };
-
- force_sig_info(SIGTRAP, &info, current);
+ send_user_sigtrap(TRAP_BRKPT);
return 0;
}
diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S
index 917d981..6f2f377 100644
--- a/arch/arm64/kernel/head.S
+++ b/arch/arm64/kernel/head.S
@@ -30,6 +30,7 @@
#include <asm/cache.h>
#include <asm/cputype.h>
#include <asm/kernel-pgtable.h>
+#include <asm/kvm_arm.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
#include <asm/pgtable.h>
@@ -464,9 +465,27 @@
isb
ret
+2:
+#ifdef CONFIG_ARM64_VHE
+ /*
+ * Check for VHE being present. For the rest of the EL2 setup,
+ * x2 being non-zero indicates that we do have VHE, and that the
+ * kernel is intended to run at EL2.
+ */
+ mrs x2, id_aa64mmfr1_el1
+ ubfx x2, x2, #8, #4
+#else
+ mov x2, xzr
+#endif
+
/* Hyp configuration. */
-2: mov x0, #(1 << 31) // 64-bit EL1
+ mov x0, #HCR_RW // 64-bit EL1
+ cbz x2, set_hcr
+ orr x0, x0, #HCR_TGE // Enable Host Extensions
+ orr x0, x0, #HCR_E2H
+set_hcr:
msr hcr_el2, x0
+ isb
/* Generic timers. */
mrs x0, cnthctl_el2
@@ -526,6 +545,13 @@
/* Stage-2 translation */
msr vttbr_el2, xzr
+ cbz x2, install_el2_stub
+
+ mov w20, #BOOT_CPU_MODE_EL2 // This CPU booted in EL2
+ isb
+ ret
+
+install_el2_stub:
/* Hypervisor stub */
adrp x0, __hyp_stub_vectors
add x0, x0, #:lo12:__hyp_stub_vectors
diff --git a/arch/arm64/kernel/image.h b/arch/arm64/kernel/image.h
index 999633b..352f7ab 100644
--- a/arch/arm64/kernel/image.h
+++ b/arch/arm64/kernel/image.h
@@ -89,6 +89,7 @@
__efistub_memmove = KALLSYMS_HIDE(__pi_memmove);
__efistub_memset = KALLSYMS_HIDE(__pi_memset);
__efistub_strlen = KALLSYMS_HIDE(__pi_strlen);
+__efistub_strnlen = KALLSYMS_HIDE(__pi_strnlen);
__efistub_strcmp = KALLSYMS_HIDE(__pi_strcmp);
__efistub_strncmp = KALLSYMS_HIDE(__pi_strncmp);
__efistub___flush_dcache_area = KALLSYMS_HIDE(__pi___flush_dcache_area);
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index f7ab14c..1b52269 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -20,6 +20,7 @@
*/
#include <asm/irq_regs.h>
+#include <asm/virt.h>
#include <linux/of.h>
#include <linux/perf/arm_pmu.h>
@@ -691,9 +692,12 @@
if (attr->exclude_idle)
return -EPERM;
+ if (is_kernel_in_hyp_mode() &&
+ attr->exclude_kernel != attr->exclude_hv)
+ return -EINVAL;
if (attr->exclude_user)
config_base |= ARMV8_EXCLUDE_EL0;
- if (attr->exclude_kernel)
+ if (!is_kernel_in_hyp_mode() && attr->exclude_kernel)
config_base |= ARMV8_EXCLUDE_EL1;
if (!attr->exclude_hv)
config_base |= ARMV8_INCLUDE_EL2;
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 4fad978..d9751a4 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -44,14 +44,13 @@
unsigned long irq_stack_ptr;
/*
- * Use raw_smp_processor_id() to avoid false-positives from
- * CONFIG_DEBUG_PREEMPT. get_wchan() calls unwind_frame() on sleeping
- * task stacks, we can be pre-empted in this case, so
- * {raw_,}smp_processor_id() may give us the wrong value. Sleeping
- * tasks can't ever be on an interrupt stack, so regardless of cpu,
- * the checks will always fail.
+ * Switching between stacks is valid when tracing current and in
+ * non-preemptible context.
*/
- irq_stack_ptr = IRQ_STACK_PTR(raw_smp_processor_id());
+ if (tsk == current && !preemptible())
+ irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+ else
+ irq_stack_ptr = 0;
low = frame->sp;
/* irq stacks are not THREAD_SIZE aligned */
@@ -64,8 +63,8 @@
return -EINVAL;
frame->sp = fp + 0x10;
- frame->fp = *(unsigned long *)(fp);
- frame->pc = *(unsigned long *)(fp + 8);
+ frame->fp = READ_ONCE_NOCHECK(*(unsigned long *)(fp));
+ frame->pc = READ_ONCE_NOCHECK(*(unsigned long *)(fp + 8));
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk && tsk->ret_stack &&
diff --git a/arch/arm64/kernel/traps.c b/arch/arm64/kernel/traps.c
index cbedd72..c539208 100644
--- a/arch/arm64/kernel/traps.c
+++ b/arch/arm64/kernel/traps.c
@@ -146,9 +146,18 @@
static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
{
struct stackframe frame;
- unsigned long irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+ unsigned long irq_stack_ptr;
int skip;
+ /*
+ * Switching between stacks is valid when tracing current and in
+ * non-preemptible context.
+ */
+ if (tsk == current && !preemptible())
+ irq_stack_ptr = IRQ_STACK_PTR(smp_processor_id());
+ else
+ irq_stack_ptr = 0;
+
pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
if (!tsk)
diff --git a/arch/arm64/kvm/Kconfig b/arch/arm64/kvm/Kconfig
index a5272c0..de7450d 100644
--- a/arch/arm64/kvm/Kconfig
+++ b/arch/arm64/kvm/Kconfig
@@ -36,6 +36,7 @@
select HAVE_KVM_EVENTFD
select HAVE_KVM_IRQFD
select KVM_ARM_VGIC_V3
+ select KVM_ARM_PMU if HW_PERF_EVENTS
---help---
Support hosting virtualized guest machines.
We don't support KVM with 16K page tables yet, due to the multiple
@@ -48,6 +49,12 @@
---help---
Provides host support for ARM processors.
+config KVM_ARM_PMU
+ bool
+ ---help---
+ Adds support for a virtual Performance Monitoring Unit (PMU) in
+ virtual machines.
+
source drivers/vhost/Kconfig
endif # VIRTUALIZATION
diff --git a/arch/arm64/kvm/Makefile b/arch/arm64/kvm/Makefile
index caee9ee..122cff4 100644
--- a/arch/arm64/kvm/Makefile
+++ b/arch/arm64/kvm/Makefile
@@ -26,3 +26,4 @@
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/vgic-v3-emul.o
kvm-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/arch_timer.o
+kvm-$(CONFIG_KVM_ARM_PMU) += $(KVM)/arm/pmu.o
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index fcb7788..dbe45c3 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -380,3 +380,54 @@
}
return 0;
}
+
+int kvm_arm_vcpu_arch_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ case KVM_ARM_VCPU_PMU_V3_CTRL:
+ ret = kvm_arm_pmu_v3_set_attr(vcpu, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+int kvm_arm_vcpu_arch_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ case KVM_ARM_VCPU_PMU_V3_CTRL:
+ ret = kvm_arm_pmu_v3_get_attr(vcpu, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
+
+int kvm_arm_vcpu_arch_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ int ret;
+
+ switch (attr->group) {
+ case KVM_ARM_VCPU_PMU_V3_CTRL:
+ ret = kvm_arm_pmu_v3_has_attr(vcpu, attr);
+ break;
+ default:
+ ret = -ENXIO;
+ break;
+ }
+
+ return ret;
+}
diff --git a/arch/arm64/kvm/hyp-init.S b/arch/arm64/kvm/hyp-init.S
index 3e568dc..7d8747c 100644
--- a/arch/arm64/kvm/hyp-init.S
+++ b/arch/arm64/kvm/hyp-init.S
@@ -64,7 +64,7 @@
mrs x4, tcr_el1
ldr x5, =TCR_EL2_MASK
and x4, x4, x5
- ldr x5, =TCR_EL2_FLAGS
+ mov x5, #TCR_EL2_RES1
orr x4, x4, x5
#ifndef CONFIG_ARM64_VA_BITS_48
@@ -85,25 +85,14 @@
ldr_l x5, idmap_t0sz
bfi x4, x5, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH
#endif
- msr tcr_el2, x4
-
- ldr x4, =VTCR_EL2_FLAGS
/*
* Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS bits in
- * VTCR_EL2.
+ * TCR_EL2.
*/
mrs x5, ID_AA64MMFR0_EL1
bfi x4, x5, #16, #3
- /*
- * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS bit in
- * VTCR_EL2.
- */
- mrs x5, ID_AA64MMFR1_EL1
- ubfx x5, x5, #5, #1
- lsl x5, x5, #VTCR_EL2_VS
- orr x4, x4, x5
- msr vtcr_el2, x4
+ msr tcr_el2, x4
mrs x4, mair_el1
msr mair_el2, x4
diff --git a/arch/arm64/kvm/hyp.S b/arch/arm64/kvm/hyp.S
index 0ccdcbb..0689a74 100644
--- a/arch/arm64/kvm/hyp.S
+++ b/arch/arm64/kvm/hyp.S
@@ -17,7 +17,9 @@
#include <linux/linkage.h>
+#include <asm/alternative.h>
#include <asm/assembler.h>
+#include <asm/cpufeature.h>
/*
* u64 kvm_call_hyp(void *hypfn, ...);
@@ -38,6 +40,11 @@
* arch/arm64/kernel/hyp_stub.S.
*/
ENTRY(kvm_call_hyp)
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
hvc #0
ret
+alternative_else
+ b __vhe_hyp_call
+ nop
+alternative_endif
ENDPROC(kvm_call_hyp)
diff --git a/arch/arm64/kvm/hyp/Makefile b/arch/arm64/kvm/hyp/Makefile
index 826032b..b6a8fc5 100644
--- a/arch/arm64/kvm/hyp/Makefile
+++ b/arch/arm64/kvm/hyp/Makefile
@@ -2,9 +2,12 @@
# Makefile for Kernel-based Virtual Machine module, HYP part
#
-obj-$(CONFIG_KVM_ARM_HOST) += vgic-v2-sr.o
+KVM=../../../../virt/kvm
+
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/vgic-v2-sr.o
+obj-$(CONFIG_KVM_ARM_HOST) += $(KVM)/arm/hyp/timer-sr.o
+
obj-$(CONFIG_KVM_ARM_HOST) += vgic-v3-sr.o
-obj-$(CONFIG_KVM_ARM_HOST) += timer-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += sysreg-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += debug-sr.o
obj-$(CONFIG_KVM_ARM_HOST) += entry.o
@@ -12,3 +15,4 @@
obj-$(CONFIG_KVM_ARM_HOST) += fpsimd.o
obj-$(CONFIG_KVM_ARM_HOST) += tlb.o
obj-$(CONFIG_KVM_ARM_HOST) += hyp-entry.o
+obj-$(CONFIG_KVM_ARM_HOST) += s2-setup.o
diff --git a/arch/arm64/kvm/hyp/debug-sr.c b/arch/arm64/kvm/hyp/debug-sr.c
index c9c1e97..053cf8b 100644
--- a/arch/arm64/kvm/hyp/debug-sr.c
+++ b/arch/arm64/kvm/hyp/debug-sr.c
@@ -19,9 +19,7 @@
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
-
-#include "hyp.h"
+#include <asm/kvm_hyp.h>
#define read_debug(r,n) read_sysreg(r##n##_el1)
#define write_debug(v,r,n) write_sysreg(v, r##n##_el1)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index fd0fbe9..ce9e5e5 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -130,9 +130,15 @@
ENTRY(__fpsimd_guest_restore)
stp x4, lr, [sp, #-16]!
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs x2, cptr_el2
bic x2, x2, #CPTR_EL2_TFP
msr cptr_el2, x2
+alternative_else
+ mrs x2, cpacr_el1
+ orr x2, x2, #CPACR_EL1_FPEN
+ msr cpacr_el1, x2
+alternative_endif
isb
mrs x3, tpidr_el2
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 93e8d983..3488894 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -19,7 +19,6 @@
#include <asm/alternative.h>
#include <asm/assembler.h>
-#include <asm/asm-offsets.h>
#include <asm/cpufeature.h>
#include <asm/kvm_arm.h>
#include <asm/kvm_asm.h>
@@ -38,10 +37,42 @@
ldp x0, x1, [sp], #16
.endm
+.macro do_el2_call
+ /*
+ * Shuffle the parameters before calling the function
+ * pointed to in x0. Assumes parameters in x[1,2,3].
+ */
+ sub sp, sp, #16
+ str lr, [sp]
+ mov lr, x0
+ mov x0, x1
+ mov x1, x2
+ mov x2, x3
+ blr lr
+ ldr lr, [sp]
+ add sp, sp, #16
+.endm
+
+ENTRY(__vhe_hyp_call)
+ do_el2_call
+ /*
+ * We used to rely on having an exception return to get
+ * an implicit isb. In the E2H case, we don't have it anymore.
+ * rather than changing all the leaf functions, just do it here
+ * before returning to the rest of the kernel.
+ */
+ isb
+ ret
+ENDPROC(__vhe_hyp_call)
+
el1_sync: // Guest trapped into EL2
save_x0_to_x3
+alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
mrs x1, esr_el2
+alternative_else
+ mrs x1, esr_el1
+alternative_endif
lsr x2, x1, #ESR_ELx_EC_SHIFT
cmp x2, #ESR_ELx_EC_HVC64
@@ -58,19 +89,13 @@
mrs x0, vbar_el2
b 2f
-1: stp lr, xzr, [sp, #-16]!
-
+1:
/*
- * Compute the function address in EL2, and shuffle the parameters.
+ * Perform the EL2 call
*/
kern_hyp_va x0
- mov lr, x0
- mov x0, x1
- mov x1, x2
- mov x2, x3
- blr lr
+ do_el2_call
- ldp lr, xzr, [sp], #16
2: eret
el1_trap:
@@ -83,72 +108,10 @@
cmp x2, #ESR_ELx_EC_FP_ASIMD
b.eq __fpsimd_guest_restore
- cmp x2, #ESR_ELx_EC_DABT_LOW
- mov x0, #ESR_ELx_EC_IABT_LOW
- ccmp x2, x0, #4, ne
- b.ne 1f // Not an abort we care about
-
- /* This is an abort. Check for permission fault */
-alternative_if_not ARM64_WORKAROUND_834220
- and x2, x1, #ESR_ELx_FSC_TYPE
- cmp x2, #FSC_PERM
- b.ne 1f // Not a permission fault
-alternative_else
- nop // Use the permission fault path to
- nop // check for a valid S1 translation,
- nop // regardless of the ESR value.
-alternative_endif
-
- /*
- * Check for Stage-1 page table walk, which is guaranteed
- * to give a valid HPFAR_EL2.
- */
- tbnz x1, #7, 1f // S1PTW is set
-
- /* Preserve PAR_EL1 */
- mrs x3, par_el1
- stp x3, xzr, [sp, #-16]!
-
- /*
- * Permission fault, HPFAR_EL2 is invalid.
- * Resolve the IPA the hard way using the guest VA.
- * Stage-1 translation already validated the memory access rights.
- * As such, we can use the EL1 translation regime, and don't have
- * to distinguish between EL0 and EL1 access.
- */
- mrs x2, far_el2
- at s1e1r, x2
- isb
-
- /* Read result */
- mrs x3, par_el1
- ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack
- msr par_el1, x0
- tbnz x3, #0, 3f // Bail out if we failed the translation
- ubfx x3, x3, #12, #36 // Extract IPA
- lsl x3, x3, #4 // and present it like HPFAR
- b 2f
-
-1: mrs x3, hpfar_el2
- mrs x2, far_el2
-
-2: mrs x0, tpidr_el2
- str w1, [x0, #VCPU_ESR_EL2]
- str x2, [x0, #VCPU_FAR_EL2]
- str x3, [x0, #VCPU_HPFAR_EL2]
-
+ mrs x0, tpidr_el2
mov x1, #ARM_EXCEPTION_TRAP
b __guest_exit
- /*
- * Translation failed. Just return to the guest and
- * let it fault again. Another CPU is probably playing
- * behind our back.
- */
-3: restore_x0_to_x3
-
- eret
-
el1_irq:
save_x0_to_x3
mrs x0, tpidr_el2
diff --git a/arch/arm64/kvm/hyp/hyp.h b/arch/arm64/kvm/hyp/hyp.h
deleted file mode 100644
index fb27517..0000000
--- a/arch/arm64/kvm/hyp/hyp.h
+++ /dev/null
@@ -1,90 +0,0 @@
-/*
- * Copyright (C) 2015 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef __ARM64_KVM_HYP_H__
-#define __ARM64_KVM_HYP_H__
-
-#include <linux/compiler.h>
-#include <linux/kvm_host.h>
-#include <asm/kvm_mmu.h>
-#include <asm/sysreg.h>
-
-#define __hyp_text __section(.hyp.text) notrace
-
-#define kern_hyp_va(v) (typeof(v))((unsigned long)(v) & HYP_PAGE_OFFSET_MASK)
-#define hyp_kern_va(v) (typeof(v))((unsigned long)(v) - HYP_PAGE_OFFSET \
- + PAGE_OFFSET)
-
-/**
- * hyp_alternate_select - Generates patchable code sequences that are
- * used to switch between two implementations of a function, depending
- * on the availability of a feature.
- *
- * @fname: a symbol name that will be defined as a function returning a
- * function pointer whose type will match @orig and @alt
- * @orig: A pointer to the default function, as returned by @fname when
- * @cond doesn't hold
- * @alt: A pointer to the alternate function, as returned by @fname
- * when @cond holds
- * @cond: a CPU feature (as described in asm/cpufeature.h)
- */
-#define hyp_alternate_select(fname, orig, alt, cond) \
-typeof(orig) * __hyp_text fname(void) \
-{ \
- typeof(alt) *val = orig; \
- asm volatile(ALTERNATIVE("nop \n", \
- "mov %0, %1 \n", \
- cond) \
- : "+r" (val) : "r" (alt)); \
- return val; \
-}
-
-void __vgic_v2_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v2_restore_state(struct kvm_vcpu *vcpu);
-
-void __vgic_v3_save_state(struct kvm_vcpu *vcpu);
-void __vgic_v3_restore_state(struct kvm_vcpu *vcpu);
-
-void __timer_save_state(struct kvm_vcpu *vcpu);
-void __timer_restore_state(struct kvm_vcpu *vcpu);
-
-void __sysreg_save_state(struct kvm_cpu_context *ctxt);
-void __sysreg_restore_state(struct kvm_cpu_context *ctxt);
-void __sysreg32_save_state(struct kvm_vcpu *vcpu);
-void __sysreg32_restore_state(struct kvm_vcpu *vcpu);
-
-void __debug_save_state(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug_arch *dbg,
- struct kvm_cpu_context *ctxt);
-void __debug_restore_state(struct kvm_vcpu *vcpu,
- struct kvm_guest_debug_arch *dbg,
- struct kvm_cpu_context *ctxt);
-void __debug_cond_save_host_state(struct kvm_vcpu *vcpu);
-void __debug_cond_restore_host_state(struct kvm_vcpu *vcpu);
-
-void __fpsimd_save_state(struct user_fpsimd_state *fp_regs);
-void __fpsimd_restore_state(struct user_fpsimd_state *fp_regs);
-static inline bool __fpsimd_enabled(void)
-{
- return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
-}
-
-u64 __guest_enter(struct kvm_vcpu *vcpu, struct kvm_cpu_context *host_ctxt);
-void __noreturn __hyp_do_panic(unsigned long, ...);
-
-#endif /* __ARM64_KVM_HYP_H__ */
-
diff --git a/arch/arm64/kvm/hyp/s2-setup.c b/arch/arm64/kvm/hyp/s2-setup.c
new file mode 100644
index 0000000..bfc54fd
--- /dev/null
+++ b/arch/arm64/kvm/hyp/s2-setup.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2016 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/types.h>
+#include <asm/kvm_arm.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+
+void __hyp_text __init_stage2_translation(void)
+{
+ u64 val = VTCR_EL2_FLAGS;
+ u64 tmp;
+
+ /*
+ * Read the PARange bits from ID_AA64MMFR0_EL1 and set the PS
+ * bits in VTCR_EL2. Amusingly, the PARange is 4 bits, while
+ * PS is only 3. Fortunately, bit 19 is RES0 in VTCR_EL2...
+ */
+ val |= (read_sysreg(id_aa64mmfr0_el1) & 7) << 16;
+
+ /*
+ * Read the VMIDBits bits from ID_AA64MMFR1_EL1 and set the VS
+ * bit in VTCR_EL2.
+ */
+ tmp = (read_sysreg(id_aa64mmfr1_el1) >> 4) & 0xf;
+ val |= (tmp == 2) ? VTCR_EL2_VS : 0;
+
+ write_sysreg(val, vtcr_el2);
+}
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index f0e7bdf..437cfad 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -15,7 +15,53 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "hyp.h"
+#include <linux/types.h>
+#include <asm/kvm_asm.h>
+#include <asm/kvm_hyp.h>
+
+static bool __hyp_text __fpsimd_enabled_nvhe(void)
+{
+ return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP);
+}
+
+static bool __hyp_text __fpsimd_enabled_vhe(void)
+{
+ return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN);
+}
+
+static hyp_alternate_select(__fpsimd_is_enabled,
+ __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+bool __hyp_text __fpsimd_enabled(void)
+{
+ return __fpsimd_is_enabled()();
+}
+
+static void __hyp_text __activate_traps_vhe(void)
+{
+ u64 val;
+
+ val = read_sysreg(cpacr_el1);
+ val |= CPACR_EL1_TTA;
+ val &= ~CPACR_EL1_FPEN;
+ write_sysreg(val, cpacr_el1);
+
+ write_sysreg(__kvm_hyp_vector, vbar_el1);
+}
+
+static void __hyp_text __activate_traps_nvhe(void)
+{
+ u64 val;
+
+ val = CPTR_EL2_DEFAULT;
+ val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
+ write_sysreg(val, cptr_el2);
+}
+
+static hyp_alternate_select(__activate_traps_arch,
+ __activate_traps_nvhe, __activate_traps_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
{
@@ -36,20 +82,37 @@
write_sysreg(val, hcr_el2);
/* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
write_sysreg(1 << 15, hstr_el2);
-
- val = CPTR_EL2_DEFAULT;
- val |= CPTR_EL2_TTA | CPTR_EL2_TFP;
- write_sysreg(val, cptr_el2);
-
+ /* Make sure we trap PMU access from EL0 to EL2 */
+ write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0);
write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2);
+ __activate_traps_arch()();
}
+static void __hyp_text __deactivate_traps_vhe(void)
+{
+ extern char vectors[]; /* kernel exception vectors */
+
+ write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
+ write_sysreg(CPACR_EL1_FPEN, cpacr_el1);
+ write_sysreg(vectors, vbar_el1);
+}
+
+static void __hyp_text __deactivate_traps_nvhe(void)
+{
+ write_sysreg(HCR_RW, hcr_el2);
+ write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
+}
+
+static hyp_alternate_select(__deactivate_traps_arch,
+ __deactivate_traps_nvhe, __deactivate_traps_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu)
{
- write_sysreg(HCR_RW, hcr_el2);
+ __deactivate_traps_arch()();
write_sysreg(0, hstr_el2);
write_sysreg(read_sysreg(mdcr_el2) & MDCR_EL2_HPMN_MASK, mdcr_el2);
- write_sysreg(CPTR_EL2_DEFAULT, cptr_el2);
+ write_sysreg(0, pmuserenr_el0);
}
static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu)
@@ -89,6 +152,86 @@
__vgic_call_restore_state()(vcpu);
}
+static bool __hyp_text __true_value(void)
+{
+ return true;
+}
+
+static bool __hyp_text __false_value(void)
+{
+ return false;
+}
+
+static hyp_alternate_select(__check_arm_834220,
+ __false_value, __true_value,
+ ARM64_WORKAROUND_834220);
+
+static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
+{
+ u64 par, tmp;
+
+ /*
+ * Resolve the IPA the hard way using the guest VA.
+ *
+ * Stage-1 translation already validated the memory access
+ * rights. As such, we can use the EL1 translation regime, and
+ * don't have to distinguish between EL0 and EL1 access.
+ *
+ * We do need to save/restore PAR_EL1 though, as we haven't
+ * saved the guest context yet, and we may return early...
+ */
+ par = read_sysreg(par_el1);
+ asm volatile("at s1e1r, %0" : : "r" (far));
+ isb();
+
+ tmp = read_sysreg(par_el1);
+ write_sysreg(par, par_el1);
+
+ if (unlikely(tmp & 1))
+ return false; /* Translation failed, back to guest */
+
+ /* Convert PAR to HPFAR format */
+ *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
+ return true;
+}
+
+static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
+{
+ u64 esr = read_sysreg_el2(esr);
+ u8 ec = esr >> ESR_ELx_EC_SHIFT;
+ u64 hpfar, far;
+
+ vcpu->arch.fault.esr_el2 = esr;
+
+ if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
+ return true;
+
+ far = read_sysreg_el2(far);
+
+ /*
+ * The HPFAR can be invalid if the stage 2 fault did not
+ * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
+ * bit is clear) and one of the two following cases are true:
+ * 1. The fault was due to a permission fault
+ * 2. The processor carries errata 834220
+ *
+ * Therefore, for all non S1PTW faults where we either have a
+ * permission fault or the errata workaround is enabled, we
+ * resolve the IPA using the AT instruction.
+ */
+ if (!(esr & ESR_ELx_S1PTW) &&
+ (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
+ if (!__translate_far_to_hpfar(far, &hpfar))
+ return false;
+ } else {
+ hpfar = read_sysreg(hpfar_el2);
+ }
+
+ vcpu->arch.fault.far_el2 = far;
+ vcpu->arch.fault.hpfar_el2 = hpfar;
+ return true;
+}
+
static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
{
struct kvm_cpu_context *host_ctxt;
@@ -102,7 +245,7 @@
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
guest_ctxt = &vcpu->arch.ctxt;
- __sysreg_save_state(host_ctxt);
+ __sysreg_save_host_state(host_ctxt);
__debug_cond_save_host_state(vcpu);
__activate_traps(vcpu);
@@ -116,16 +259,20 @@
* to Cortex-A57 erratum #852523.
*/
__sysreg32_restore_state(vcpu);
- __sysreg_restore_state(guest_ctxt);
+ __sysreg_restore_guest_state(guest_ctxt);
__debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
/* Jump in the fire! */
+again:
exit_code = __guest_enter(vcpu, host_ctxt);
/* And we're baaack! */
+ if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
+ goto again;
+
fp_enabled = __fpsimd_enabled();
- __sysreg_save_state(guest_ctxt);
+ __sysreg_save_guest_state(guest_ctxt);
__sysreg32_save_state(vcpu);
__timer_save_state(vcpu);
__vgic_save_state(vcpu);
@@ -133,7 +280,7 @@
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
- __sysreg_restore_state(host_ctxt);
+ __sysreg_restore_host_state(host_ctxt);
if (fp_enabled) {
__fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs);
@@ -150,11 +297,34 @@
static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n";
-void __hyp_text __noreturn __hyp_panic(void)
+static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par)
{
unsigned long str_va = (unsigned long)__hyp_panic_string;
- u64 spsr = read_sysreg(spsr_el2);
- u64 elr = read_sysreg(elr_el2);
+
+ __hyp_do_panic(hyp_kern_va(str_va),
+ spsr, elr,
+ read_sysreg(esr_el2), read_sysreg_el2(far),
+ read_sysreg(hpfar_el2), par,
+ (void *)read_sysreg(tpidr_el2));
+}
+
+static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par)
+{
+ panic(__hyp_panic_string,
+ spsr, elr,
+ read_sysreg_el2(esr), read_sysreg_el2(far),
+ read_sysreg(hpfar_el2), par,
+ (void *)read_sysreg(tpidr_el2));
+}
+
+static hyp_alternate_select(__hyp_call_panic,
+ __hyp_call_panic_nvhe, __hyp_call_panic_vhe,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+void __hyp_text __noreturn __hyp_panic(void)
+{
+ u64 spsr = read_sysreg_el2(spsr);
+ u64 elr = read_sysreg_el2(elr);
u64 par = read_sysreg(par_el1);
if (read_sysreg(vttbr_el2)) {
@@ -165,15 +335,11 @@
host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context);
__deactivate_traps(vcpu);
__deactivate_vm(vcpu);
- __sysreg_restore_state(host_ctxt);
+ __sysreg_restore_host_state(host_ctxt);
}
/* Call panic for real */
- __hyp_do_panic(hyp_kern_va(str_va),
- spsr, elr,
- read_sysreg(esr_el2), read_sysreg(far_el2),
- read_sysreg(hpfar_el2), par,
- (void *)read_sysreg(tpidr_el2));
+ __hyp_call_panic()(spsr, elr, par);
unreachable();
}
diff --git a/arch/arm64/kvm/hyp/sysreg-sr.c b/arch/arm64/kvm/hyp/sysreg-sr.c
index 42563098..0f7c40e 100644
--- a/arch/arm64/kvm/hyp/sysreg-sr.c
+++ b/arch/arm64/kvm/hyp/sysreg-sr.c
@@ -19,75 +19,122 @@
#include <linux/kvm_host.h>
#include <asm/kvm_asm.h>
-#include <asm/kvm_mmu.h>
+#include <asm/kvm_hyp.h>
-#include "hyp.h"
+/* Yes, this does nothing, on purpose */
+static void __hyp_text __sysreg_do_nothing(struct kvm_cpu_context *ctxt) { }
-/* ctxt is already in the HYP VA space */
-void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+/*
+ * Non-VHE: Both host and guest must save everything.
+ *
+ * VHE: Host must save tpidr*_el[01], actlr_el1, sp0, pc, pstate, and
+ * guest must save everything.
+ */
+
+static void __hyp_text __sysreg_save_common_state(struct kvm_cpu_context *ctxt)
{
- ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
- ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
- ctxt->sys_regs[SCTLR_EL1] = read_sysreg(sctlr_el1);
ctxt->sys_regs[ACTLR_EL1] = read_sysreg(actlr_el1);
- ctxt->sys_regs[CPACR_EL1] = read_sysreg(cpacr_el1);
- ctxt->sys_regs[TTBR0_EL1] = read_sysreg(ttbr0_el1);
- ctxt->sys_regs[TTBR1_EL1] = read_sysreg(ttbr1_el1);
- ctxt->sys_regs[TCR_EL1] = read_sysreg(tcr_el1);
- ctxt->sys_regs[ESR_EL1] = read_sysreg(esr_el1);
- ctxt->sys_regs[AFSR0_EL1] = read_sysreg(afsr0_el1);
- ctxt->sys_regs[AFSR1_EL1] = read_sysreg(afsr1_el1);
- ctxt->sys_regs[FAR_EL1] = read_sysreg(far_el1);
- ctxt->sys_regs[MAIR_EL1] = read_sysreg(mair_el1);
- ctxt->sys_regs[VBAR_EL1] = read_sysreg(vbar_el1);
- ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg(contextidr_el1);
ctxt->sys_regs[TPIDR_EL0] = read_sysreg(tpidr_el0);
ctxt->sys_regs[TPIDRRO_EL0] = read_sysreg(tpidrro_el0);
ctxt->sys_regs[TPIDR_EL1] = read_sysreg(tpidr_el1);
- ctxt->sys_regs[AMAIR_EL1] = read_sysreg(amair_el1);
- ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg(cntkctl_el1);
+ ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
+ ctxt->gp_regs.regs.pc = read_sysreg_el2(elr);
+ ctxt->gp_regs.regs.pstate = read_sysreg_el2(spsr);
+}
+
+static void __hyp_text __sysreg_save_state(struct kvm_cpu_context *ctxt)
+{
+ ctxt->sys_regs[MPIDR_EL1] = read_sysreg(vmpidr_el2);
+ ctxt->sys_regs[CSSELR_EL1] = read_sysreg(csselr_el1);
+ ctxt->sys_regs[SCTLR_EL1] = read_sysreg_el1(sctlr);
+ ctxt->sys_regs[CPACR_EL1] = read_sysreg_el1(cpacr);
+ ctxt->sys_regs[TTBR0_EL1] = read_sysreg_el1(ttbr0);
+ ctxt->sys_regs[TTBR1_EL1] = read_sysreg_el1(ttbr1);
+ ctxt->sys_regs[TCR_EL1] = read_sysreg_el1(tcr);
+ ctxt->sys_regs[ESR_EL1] = read_sysreg_el1(esr);
+ ctxt->sys_regs[AFSR0_EL1] = read_sysreg_el1(afsr0);
+ ctxt->sys_regs[AFSR1_EL1] = read_sysreg_el1(afsr1);
+ ctxt->sys_regs[FAR_EL1] = read_sysreg_el1(far);
+ ctxt->sys_regs[MAIR_EL1] = read_sysreg_el1(mair);
+ ctxt->sys_regs[VBAR_EL1] = read_sysreg_el1(vbar);
+ ctxt->sys_regs[CONTEXTIDR_EL1] = read_sysreg_el1(contextidr);
+ ctxt->sys_regs[AMAIR_EL1] = read_sysreg_el1(amair);
+ ctxt->sys_regs[CNTKCTL_EL1] = read_sysreg_el1(cntkctl);
ctxt->sys_regs[PAR_EL1] = read_sysreg(par_el1);
ctxt->sys_regs[MDSCR_EL1] = read_sysreg(mdscr_el1);
- ctxt->gp_regs.regs.sp = read_sysreg(sp_el0);
- ctxt->gp_regs.regs.pc = read_sysreg(elr_el2);
- ctxt->gp_regs.regs.pstate = read_sysreg(spsr_el2);
ctxt->gp_regs.sp_el1 = read_sysreg(sp_el1);
- ctxt->gp_regs.elr_el1 = read_sysreg(elr_el1);
- ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg(spsr_el1);
+ ctxt->gp_regs.elr_el1 = read_sysreg_el1(elr);
+ ctxt->gp_regs.spsr[KVM_SPSR_EL1]= read_sysreg_el1(spsr);
}
-void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+static hyp_alternate_select(__sysreg_call_save_host_state,
+ __sysreg_save_state, __sysreg_do_nothing,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+void __hyp_text __sysreg_save_host_state(struct kvm_cpu_context *ctxt)
{
- write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
- write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
- write_sysreg(ctxt->sys_regs[SCTLR_EL1], sctlr_el1);
+ __sysreg_call_save_host_state()(ctxt);
+ __sysreg_save_common_state(ctxt);
+}
+
+void __hyp_text __sysreg_save_guest_state(struct kvm_cpu_context *ctxt)
+{
+ __sysreg_save_state(ctxt);
+ __sysreg_save_common_state(ctxt);
+}
+
+static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
+{
write_sysreg(ctxt->sys_regs[ACTLR_EL1], actlr_el1);
- write_sysreg(ctxt->sys_regs[CPACR_EL1], cpacr_el1);
- write_sysreg(ctxt->sys_regs[TTBR0_EL1], ttbr0_el1);
- write_sysreg(ctxt->sys_regs[TTBR1_EL1], ttbr1_el1);
- write_sysreg(ctxt->sys_regs[TCR_EL1], tcr_el1);
- write_sysreg(ctxt->sys_regs[ESR_EL1], esr_el1);
- write_sysreg(ctxt->sys_regs[AFSR0_EL1], afsr0_el1);
- write_sysreg(ctxt->sys_regs[AFSR1_EL1], afsr1_el1);
- write_sysreg(ctxt->sys_regs[FAR_EL1], far_el1);
- write_sysreg(ctxt->sys_regs[MAIR_EL1], mair_el1);
- write_sysreg(ctxt->sys_regs[VBAR_EL1], vbar_el1);
- write_sysreg(ctxt->sys_regs[CONTEXTIDR_EL1], contextidr_el1);
write_sysreg(ctxt->sys_regs[TPIDR_EL0], tpidr_el0);
write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
write_sysreg(ctxt->sys_regs[TPIDR_EL1], tpidr_el1);
- write_sysreg(ctxt->sys_regs[AMAIR_EL1], amair_el1);
- write_sysreg(ctxt->sys_regs[CNTKCTL_EL1], cntkctl_el1);
- write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
- write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
+ write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
+ write_sysreg_el2(ctxt->gp_regs.regs.pc, elr);
+ write_sysreg_el2(ctxt->gp_regs.regs.pstate, spsr);
+}
- write_sysreg(ctxt->gp_regs.regs.sp, sp_el0);
- write_sysreg(ctxt->gp_regs.regs.pc, elr_el2);
- write_sysreg(ctxt->gp_regs.regs.pstate, spsr_el2);
- write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
- write_sysreg(ctxt->gp_regs.elr_el1, elr_el1);
- write_sysreg(ctxt->gp_regs.spsr[KVM_SPSR_EL1], spsr_el1);
+static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
+{
+ write_sysreg(ctxt->sys_regs[MPIDR_EL1], vmpidr_el2);
+ write_sysreg(ctxt->sys_regs[CSSELR_EL1], csselr_el1);
+ write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1], sctlr);
+ write_sysreg_el1(ctxt->sys_regs[CPACR_EL1], cpacr);
+ write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1], ttbr0);
+ write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1], ttbr1);
+ write_sysreg_el1(ctxt->sys_regs[TCR_EL1], tcr);
+ write_sysreg_el1(ctxt->sys_regs[ESR_EL1], esr);
+ write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1], afsr0);
+ write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1], afsr1);
+ write_sysreg_el1(ctxt->sys_regs[FAR_EL1], far);
+ write_sysreg_el1(ctxt->sys_regs[MAIR_EL1], mair);
+ write_sysreg_el1(ctxt->sys_regs[VBAR_EL1], vbar);
+ write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr);
+ write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1], amair);
+ write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], cntkctl);
+ write_sysreg(ctxt->sys_regs[PAR_EL1], par_el1);
+ write_sysreg(ctxt->sys_regs[MDSCR_EL1], mdscr_el1);
+
+ write_sysreg(ctxt->gp_regs.sp_el1, sp_el1);
+ write_sysreg_el1(ctxt->gp_regs.elr_el1, elr);
+ write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
+}
+
+static hyp_alternate_select(__sysreg_call_restore_host_state,
+ __sysreg_restore_state, __sysreg_do_nothing,
+ ARM64_HAS_VIRT_HOST_EXTN);
+
+void __hyp_text __sysreg_restore_host_state(struct kvm_cpu_context *ctxt)
+{
+ __sysreg_call_restore_host_state()(ctxt);
+ __sysreg_restore_common_state(ctxt);
+}
+
+void __hyp_text __sysreg_restore_guest_state(struct kvm_cpu_context *ctxt)
+{
+ __sysreg_restore_state(ctxt);
+ __sysreg_restore_common_state(ctxt);
}
void __hyp_text __sysreg32_save_state(struct kvm_vcpu *vcpu)
diff --git a/arch/arm64/kvm/hyp/tlb.c b/arch/arm64/kvm/hyp/tlb.c
index 2a7e0d8..be8177c 100644
--- a/arch/arm64/kvm/hyp/tlb.c
+++ b/arch/arm64/kvm/hyp/tlb.c
@@ -15,7 +15,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
-#include "hyp.h"
+#include <asm/kvm_hyp.h>
static void __hyp_text __tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
diff --git a/arch/arm64/kvm/hyp/vgic-v2-sr.c b/arch/arm64/kvm/hyp/vgic-v2-sr.c
deleted file mode 100644
index e717612..0000000
--- a/arch/arm64/kvm/hyp/vgic-v2-sr.c
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2012-2015 - ARM Ltd
- * Author: Marc Zyngier <marc.zyngier@arm.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program. If not, see <http://www.gnu.org/licenses/>.
- */
-
-#include <linux/compiler.h>
-#include <linux/irqchip/arm-gic.h>
-#include <linux/kvm_host.h>
-
-#include <asm/kvm_mmu.h>
-
-#include "hyp.h"
-
-/* vcpu is already in the HYP VA space */
-void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = kern_hyp_va(vcpu->kvm);
- struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- struct vgic_dist *vgic = &kvm->arch.vgic;
- void __iomem *base = kern_hyp_va(vgic->vctrl_base);
- u32 eisr0, eisr1, elrsr0, elrsr1;
- int i, nr_lr;
-
- if (!base)
- return;
-
- nr_lr = vcpu->arch.vgic_cpu.nr_lr;
- cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
- cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
- eisr0 = readl_relaxed(base + GICH_EISR0);
- elrsr0 = readl_relaxed(base + GICH_ELRSR0);
- if (unlikely(nr_lr > 32)) {
- eisr1 = readl_relaxed(base + GICH_EISR1);
- elrsr1 = readl_relaxed(base + GICH_ELRSR1);
- } else {
- eisr1 = elrsr1 = 0;
- }
-#ifdef CONFIG_CPU_BIG_ENDIAN
- cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
- cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
-#else
- cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
- cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
-#endif
- cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
-
- writel_relaxed(0, base + GICH_HCR);
-
- for (i = 0; i < nr_lr; i++)
- cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
-}
-
-/* vcpu is already in the HYP VA space */
-void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
-{
- struct kvm *kvm = kern_hyp_va(vcpu->kvm);
- struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
- struct vgic_dist *vgic = &kvm->arch.vgic;
- void __iomem *base = kern_hyp_va(vgic->vctrl_base);
- int i, nr_lr;
-
- if (!base)
- return;
-
- writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
- writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
- writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
-
- nr_lr = vcpu->arch.vgic_cpu.nr_lr;
- for (i = 0; i < nr_lr; i++)
- writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
-}
diff --git a/arch/arm64/kvm/hyp/vgic-v3-sr.c b/arch/arm64/kvm/hyp/vgic-v3-sr.c
index 9142e08..fff7cd4 100644
--- a/arch/arm64/kvm/hyp/vgic-v3-sr.c
+++ b/arch/arm64/kvm/hyp/vgic-v3-sr.c
@@ -19,9 +19,7 @@
#include <linux/irqchip/arm-gic-v3.h>
#include <linux/kvm_host.h>
-#include <asm/kvm_mmu.h>
-
-#include "hyp.h"
+#include <asm/kvm_hyp.h>
#define vtr_to_max_lr_idx(v) ((v) & 0xf)
#define vtr_to_nr_pri_bits(v) (((u32)(v) >> 29) + 1)
@@ -39,12 +37,133 @@
asm volatile("msr_s " __stringify(r) ", %0" : : "r" (__val));\
} while (0)
-/* vcpu is already in the HYP VA space */
+static u64 __hyp_text __gic_v3_get_lr(unsigned int lr)
+{
+ switch (lr & 0xf) {
+ case 0:
+ return read_gicreg(ICH_LR0_EL2);
+ case 1:
+ return read_gicreg(ICH_LR1_EL2);
+ case 2:
+ return read_gicreg(ICH_LR2_EL2);
+ case 3:
+ return read_gicreg(ICH_LR3_EL2);
+ case 4:
+ return read_gicreg(ICH_LR4_EL2);
+ case 5:
+ return read_gicreg(ICH_LR5_EL2);
+ case 6:
+ return read_gicreg(ICH_LR6_EL2);
+ case 7:
+ return read_gicreg(ICH_LR7_EL2);
+ case 8:
+ return read_gicreg(ICH_LR8_EL2);
+ case 9:
+ return read_gicreg(ICH_LR9_EL2);
+ case 10:
+ return read_gicreg(ICH_LR10_EL2);
+ case 11:
+ return read_gicreg(ICH_LR11_EL2);
+ case 12:
+ return read_gicreg(ICH_LR12_EL2);
+ case 13:
+ return read_gicreg(ICH_LR13_EL2);
+ case 14:
+ return read_gicreg(ICH_LR14_EL2);
+ case 15:
+ return read_gicreg(ICH_LR15_EL2);
+ }
+
+ unreachable();
+}
+
+static void __hyp_text __gic_v3_set_lr(u64 val, int lr)
+{
+ switch (lr & 0xf) {
+ case 0:
+ write_gicreg(val, ICH_LR0_EL2);
+ break;
+ case 1:
+ write_gicreg(val, ICH_LR1_EL2);
+ break;
+ case 2:
+ write_gicreg(val, ICH_LR2_EL2);
+ break;
+ case 3:
+ write_gicreg(val, ICH_LR3_EL2);
+ break;
+ case 4:
+ write_gicreg(val, ICH_LR4_EL2);
+ break;
+ case 5:
+ write_gicreg(val, ICH_LR5_EL2);
+ break;
+ case 6:
+ write_gicreg(val, ICH_LR6_EL2);
+ break;
+ case 7:
+ write_gicreg(val, ICH_LR7_EL2);
+ break;
+ case 8:
+ write_gicreg(val, ICH_LR8_EL2);
+ break;
+ case 9:
+ write_gicreg(val, ICH_LR9_EL2);
+ break;
+ case 10:
+ write_gicreg(val, ICH_LR10_EL2);
+ break;
+ case 11:
+ write_gicreg(val, ICH_LR11_EL2);
+ break;
+ case 12:
+ write_gicreg(val, ICH_LR12_EL2);
+ break;
+ case 13:
+ write_gicreg(val, ICH_LR13_EL2);
+ break;
+ case 14:
+ write_gicreg(val, ICH_LR14_EL2);
+ break;
+ case 15:
+ write_gicreg(val, ICH_LR15_EL2);
+ break;
+ }
+}
+
+static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu, int nr_lr)
+{
+ struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
+ int i;
+ bool expect_mi;
+
+ expect_mi = !!(cpu_if->vgic_hcr & ICH_HCR_UIE);
+
+ for (i = 0; i < nr_lr; i++) {
+ if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
+ continue;
+
+ expect_mi |= (!(cpu_if->vgic_lr[i] & ICH_LR_HW) &&
+ (cpu_if->vgic_lr[i] & ICH_LR_EOI));
+ }
+
+ if (expect_mi) {
+ cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
+
+ if (cpu_if->vgic_misr & ICH_MISR_EOI)
+ cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
+ else
+ cpu_if->vgic_eisr = 0;
+ } else {
+ cpu_if->vgic_misr = 0;
+ cpu_if->vgic_eisr = 0;
+ }
+}
+
void __hyp_text __vgic_v3_save_state(struct kvm_vcpu *vcpu)
{
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 val;
- u32 max_lr_idx, nr_pri_bits;
/*
* Make sure stores to the GIC via the memory mapped interface
@@ -53,68 +172,66 @@
dsb(st);
cpu_if->vgic_vmcr = read_gicreg(ICH_VMCR_EL2);
- cpu_if->vgic_misr = read_gicreg(ICH_MISR_EL2);
- cpu_if->vgic_eisr = read_gicreg(ICH_EISR_EL2);
- cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
- write_gicreg(0, ICH_HCR_EL2);
- val = read_gicreg(ICH_VTR_EL2);
- max_lr_idx = vtr_to_max_lr_idx(val);
- nr_pri_bits = vtr_to_nr_pri_bits(val);
+ if (vcpu->arch.vgic_cpu.live_lrs) {
+ int i;
+ u32 max_lr_idx, nr_pri_bits;
- switch (max_lr_idx) {
- case 15:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)] = read_gicreg(ICH_LR15_EL2);
- case 14:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)] = read_gicreg(ICH_LR14_EL2);
- case 13:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)] = read_gicreg(ICH_LR13_EL2);
- case 12:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)] = read_gicreg(ICH_LR12_EL2);
- case 11:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)] = read_gicreg(ICH_LR11_EL2);
- case 10:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)] = read_gicreg(ICH_LR10_EL2);
- case 9:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)] = read_gicreg(ICH_LR9_EL2);
- case 8:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)] = read_gicreg(ICH_LR8_EL2);
- case 7:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)] = read_gicreg(ICH_LR7_EL2);
- case 6:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)] = read_gicreg(ICH_LR6_EL2);
- case 5:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)] = read_gicreg(ICH_LR5_EL2);
- case 4:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)] = read_gicreg(ICH_LR4_EL2);
- case 3:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)] = read_gicreg(ICH_LR3_EL2);
- case 2:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)] = read_gicreg(ICH_LR2_EL2);
- case 1:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)] = read_gicreg(ICH_LR1_EL2);
- case 0:
- cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)] = read_gicreg(ICH_LR0_EL2);
- }
+ cpu_if->vgic_elrsr = read_gicreg(ICH_ELSR_EL2);
- switch (nr_pri_bits) {
- case 7:
- cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
- cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
- case 6:
- cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
- default:
- cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
- }
+ write_gicreg(0, ICH_HCR_EL2);
+ val = read_gicreg(ICH_VTR_EL2);
+ max_lr_idx = vtr_to_max_lr_idx(val);
+ nr_pri_bits = vtr_to_nr_pri_bits(val);
- switch (nr_pri_bits) {
- case 7:
- cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
- cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
- case 6:
- cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
- default:
- cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
+ save_maint_int_state(vcpu, max_lr_idx + 1);
+
+ for (i = 0; i <= max_lr_idx; i++) {
+ if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
+ continue;
+
+ if (cpu_if->vgic_elrsr & (1 << i)) {
+ cpu_if->vgic_lr[i] &= ~ICH_LR_STATE;
+ continue;
+ }
+
+ cpu_if->vgic_lr[i] = __gic_v3_get_lr(i);
+ __gic_v3_set_lr(0, i);
+ }
+
+ switch (nr_pri_bits) {
+ case 7:
+ cpu_if->vgic_ap0r[3] = read_gicreg(ICH_AP0R3_EL2);
+ cpu_if->vgic_ap0r[2] = read_gicreg(ICH_AP0R2_EL2);
+ case 6:
+ cpu_if->vgic_ap0r[1] = read_gicreg(ICH_AP0R1_EL2);
+ default:
+ cpu_if->vgic_ap0r[0] = read_gicreg(ICH_AP0R0_EL2);
+ }
+
+ switch (nr_pri_bits) {
+ case 7:
+ cpu_if->vgic_ap1r[3] = read_gicreg(ICH_AP1R3_EL2);
+ cpu_if->vgic_ap1r[2] = read_gicreg(ICH_AP1R2_EL2);
+ case 6:
+ cpu_if->vgic_ap1r[1] = read_gicreg(ICH_AP1R1_EL2);
+ default:
+ cpu_if->vgic_ap1r[0] = read_gicreg(ICH_AP1R0_EL2);
+ }
+
+ vcpu->arch.vgic_cpu.live_lrs = 0;
+ } else {
+ cpu_if->vgic_misr = 0;
+ cpu_if->vgic_eisr = 0;
+ cpu_if->vgic_elrsr = 0xffff;
+ cpu_if->vgic_ap0r[0] = 0;
+ cpu_if->vgic_ap0r[1] = 0;
+ cpu_if->vgic_ap0r[2] = 0;
+ cpu_if->vgic_ap0r[3] = 0;
+ cpu_if->vgic_ap1r[0] = 0;
+ cpu_if->vgic_ap1r[1] = 0;
+ cpu_if->vgic_ap1r[2] = 0;
+ cpu_if->vgic_ap1r[3] = 0;
}
val = read_gicreg(ICC_SRE_EL2);
@@ -128,6 +245,8 @@
struct vgic_v3_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v3;
u64 val;
u32 max_lr_idx, nr_pri_bits;
+ u16 live_lrs = 0;
+ int i;
/*
* VFIQEn is RES1 if ICC_SRE_EL1.SRE is 1. This causes a
@@ -140,66 +259,46 @@
write_gicreg(cpu_if->vgic_sre, ICC_SRE_EL1);
isb();
- write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
- write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
-
val = read_gicreg(ICH_VTR_EL2);
max_lr_idx = vtr_to_max_lr_idx(val);
nr_pri_bits = vtr_to_nr_pri_bits(val);
- switch (nr_pri_bits) {
- case 7:
- write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
- write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
- case 6:
- write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
- default:
- write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
- }
-
- switch (nr_pri_bits) {
- case 7:
- write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
- write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
- case 6:
- write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
- default:
- write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
+ for (i = 0; i <= max_lr_idx; i++) {
+ if (cpu_if->vgic_lr[i] & ICH_LR_STATE)
+ live_lrs |= (1 << i);
}
- switch (max_lr_idx) {
- case 15:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(15)], ICH_LR15_EL2);
- case 14:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(14)], ICH_LR14_EL2);
- case 13:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(13)], ICH_LR13_EL2);
- case 12:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(12)], ICH_LR12_EL2);
- case 11:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(11)], ICH_LR11_EL2);
- case 10:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(10)], ICH_LR10_EL2);
- case 9:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(9)], ICH_LR9_EL2);
- case 8:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(8)], ICH_LR8_EL2);
- case 7:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(7)], ICH_LR7_EL2);
- case 6:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(6)], ICH_LR6_EL2);
- case 5:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(5)], ICH_LR5_EL2);
- case 4:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(4)], ICH_LR4_EL2);
- case 3:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(3)], ICH_LR3_EL2);
- case 2:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(2)], ICH_LR2_EL2);
- case 1:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(1)], ICH_LR1_EL2);
- case 0:
- write_gicreg(cpu_if->vgic_lr[VGIC_V3_LR_INDEX(0)], ICH_LR0_EL2);
+ write_gicreg(cpu_if->vgic_vmcr, ICH_VMCR_EL2);
+
+ if (live_lrs) {
+ write_gicreg(cpu_if->vgic_hcr, ICH_HCR_EL2);
+
+ switch (nr_pri_bits) {
+ case 7:
+ write_gicreg(cpu_if->vgic_ap0r[3], ICH_AP0R3_EL2);
+ write_gicreg(cpu_if->vgic_ap0r[2], ICH_AP0R2_EL2);
+ case 6:
+ write_gicreg(cpu_if->vgic_ap0r[1], ICH_AP0R1_EL2);
+ default:
+ write_gicreg(cpu_if->vgic_ap0r[0], ICH_AP0R0_EL2);
+ }
+
+ switch (nr_pri_bits) {
+ case 7:
+ write_gicreg(cpu_if->vgic_ap1r[3], ICH_AP1R3_EL2);
+ write_gicreg(cpu_if->vgic_ap1r[2], ICH_AP1R2_EL2);
+ case 6:
+ write_gicreg(cpu_if->vgic_ap1r[1], ICH_AP1R1_EL2);
+ default:
+ write_gicreg(cpu_if->vgic_ap1r[0], ICH_AP1R0_EL2);
+ }
+
+ for (i = 0; i <= max_lr_idx; i++) {
+ if (!(live_lrs & (1 << i)))
+ continue;
+
+ __gic_v3_set_lr(cpu_if->vgic_lr[i], i);
+ }
}
/*
@@ -209,6 +308,7 @@
*/
isb();
dsb(sy);
+ vcpu->arch.vgic_cpu.live_lrs = live_lrs;
/*
* Prevent the guest from touching the GIC system registers if
@@ -220,6 +320,15 @@
}
}
+void __hyp_text __vgic_v3_init_lrs(void)
+{
+ int max_lr_idx = vtr_to_max_lr_idx(read_gicreg(ICH_VTR_EL2));
+ int i;
+
+ for (i = 0; i <= max_lr_idx; i++)
+ __gic_v3_set_lr(0, i);
+}
+
static u64 __hyp_text __vgic_v3_read_ich_vtr_el2(void)
{
return read_gicreg(ICH_VTR_EL2);
diff --git a/arch/arm64/kvm/reset.c b/arch/arm64/kvm/reset.c
index f34745c..9677bf0 100644
--- a/arch/arm64/kvm/reset.c
+++ b/arch/arm64/kvm/reset.c
@@ -77,7 +77,11 @@
case KVM_CAP_GUEST_DEBUG_HW_WPS:
r = get_num_wrps();
break;
+ case KVM_CAP_ARM_PMU_V3:
+ r = kvm_arm_support_pmu_v3();
+ break;
case KVM_CAP_SET_GUEST_DEBUG:
+ case KVM_CAP_VCPU_ATTRIBUTES:
r = 1;
break;
default:
@@ -120,6 +124,9 @@
/* Reset system registers */
kvm_reset_sys_regs(vcpu);
+ /* Reset PMU */
+ kvm_pmu_vcpu_reset(vcpu);
+
/* Reset timer */
return kvm_timer_vcpu_reset(vcpu, cpu_vtimer_irq);
}
diff --git a/arch/arm64/kvm/sys_regs.c b/arch/arm64/kvm/sys_regs.c
index 2e90371..61ba591 100644
--- a/arch/arm64/kvm/sys_regs.c
+++ b/arch/arm64/kvm/sys_regs.c
@@ -20,6 +20,7 @@
* along with this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/bsearch.h>
#include <linux/kvm_host.h>
#include <linux/mm.h>
#include <linux/uaccess.h>
@@ -34,6 +35,7 @@
#include <asm/kvm_emulate.h>
#include <asm/kvm_host.h>
#include <asm/kvm_mmu.h>
+#include <asm/perf_event.h>
#include <trace/events/kvm.h>
@@ -439,6 +441,344 @@
vcpu_sys_reg(vcpu, MPIDR_EL1) = (1ULL << 31) | mpidr;
}
+static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
+{
+ u64 pmcr, val;
+
+ asm volatile("mrs %0, pmcr_el0\n" : "=r" (pmcr));
+ /* Writable bits of PMCR_EL0 (ARMV8_PMU_PMCR_MASK) is reset to UNKNOWN
+ * except PMCR.E resetting to zero.
+ */
+ val = ((pmcr & ~ARMV8_PMU_PMCR_MASK)
+ | (ARMV8_PMU_PMCR_MASK & 0xdecafbad)) & (~ARMV8_PMU_PMCR_E);
+ vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+}
+
+static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
+{
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+ return !((reg & ARMV8_PMU_USERENR_EN) || vcpu_mode_priv(vcpu));
+}
+
+static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
+{
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+ return !((reg & (ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN))
+ || vcpu_mode_priv(vcpu));
+}
+
+static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
+{
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+ return !((reg & (ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN))
+ || vcpu_mode_priv(vcpu));
+}
+
+static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
+{
+ u64 reg = vcpu_sys_reg(vcpu, PMUSERENR_EL0);
+
+ return !((reg & (ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN))
+ || vcpu_mode_priv(vcpu));
+}
+
+static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 val;
+
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
+ if (p->is_write) {
+ /* Only update writeable bits of PMCR */
+ val = vcpu_sys_reg(vcpu, PMCR_EL0);
+ val &= ~ARMV8_PMU_PMCR_MASK;
+ val |= p->regval & ARMV8_PMU_PMCR_MASK;
+ vcpu_sys_reg(vcpu, PMCR_EL0) = val;
+ kvm_pmu_handle_pmcr(vcpu, val);
+ } else {
+ /* PMCR.P & PMCR.C are RAZ */
+ val = vcpu_sys_reg(vcpu, PMCR_EL0)
+ & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
+ p->regval = val;
+ }
+
+ return true;
+}
+
+static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+ return false;
+
+ if (p->is_write)
+ vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
+ else
+ /* return PMSELR.SEL field */
+ p->regval = vcpu_sys_reg(vcpu, PMSELR_EL0)
+ & ARMV8_PMU_COUNTER_MASK;
+
+ return true;
+}
+
+static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 pmceid;
+
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ BUG_ON(p->is_write);
+
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
+ if (!(p->Op2 & 1))
+ asm volatile("mrs %0, pmceid0_el0\n" : "=r" (pmceid));
+ else
+ asm volatile("mrs %0, pmceid1_el0\n" : "=r" (pmceid));
+
+ p->regval = pmceid;
+
+ return true;
+}
+
+static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
+{
+ u64 pmcr, val;
+
+ pmcr = vcpu_sys_reg(vcpu, PMCR_EL0);
+ val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
+ if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX)
+ return false;
+
+ return true;
+}
+
+static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
+ struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 idx;
+
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ if (r->CRn == 9 && r->CRm == 13) {
+ if (r->Op2 == 2) {
+ /* PMXEVCNTR_EL0 */
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+ return false;
+
+ idx = vcpu_sys_reg(vcpu, PMSELR_EL0)
+ & ARMV8_PMU_COUNTER_MASK;
+ } else if (r->Op2 == 0) {
+ /* PMCCNTR_EL0 */
+ if (pmu_access_cycle_counter_el0_disabled(vcpu))
+ return false;
+
+ idx = ARMV8_PMU_CYCLE_IDX;
+ } else {
+ BUG();
+ }
+ } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
+ /* PMEVCNTRn_EL0 */
+ if (pmu_access_event_counter_el0_disabled(vcpu))
+ return false;
+
+ idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+ } else {
+ BUG();
+ }
+
+ if (!pmu_counter_idx_valid(vcpu, idx))
+ return false;
+
+ if (p->is_write) {
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
+ kvm_pmu_set_counter_value(vcpu, idx, p->regval);
+ } else {
+ p->regval = kvm_pmu_get_counter_value(vcpu, idx);
+ }
+
+ return true;
+}
+
+static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 idx, reg;
+
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
+ if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
+ /* PMXEVTYPER_EL0 */
+ idx = vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
+ reg = PMEVTYPER0_EL0 + idx;
+ } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
+ idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
+ if (idx == ARMV8_PMU_CYCLE_IDX)
+ reg = PMCCFILTR_EL0;
+ else
+ /* PMEVTYPERn_EL0 */
+ reg = PMEVTYPER0_EL0 + idx;
+ } else {
+ BUG();
+ }
+
+ if (!pmu_counter_idx_valid(vcpu, idx))
+ return false;
+
+ if (p->is_write) {
+ kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
+ vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
+ } else {
+ p->regval = vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
+ }
+
+ return true;
+}
+
+static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 val, mask;
+
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
+ mask = kvm_pmu_valid_counter_mask(vcpu);
+ if (p->is_write) {
+ val = p->regval & mask;
+ if (r->Op2 & 0x1) {
+ /* accessing PMCNTENSET_EL0 */
+ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
+ kvm_pmu_enable_counter(vcpu, val);
+ } else {
+ /* accessing PMCNTENCLR_EL0 */
+ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
+ kvm_pmu_disable_counter(vcpu, val);
+ }
+ } else {
+ p->regval = vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask;
+ }
+
+ return true;
+}
+
+static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ if (!vcpu_mode_priv(vcpu))
+ return false;
+
+ if (p->is_write) {
+ u64 val = p->regval & mask;
+
+ if (r->Op2 & 0x1)
+ /* accessing PMINTENSET_EL1 */
+ vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
+ else
+ /* accessing PMINTENCLR_EL1 */
+ vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
+ } else {
+ p->regval = vcpu_sys_reg(vcpu, PMINTENSET_EL1) & mask;
+ }
+
+ return true;
+}
+
+static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 mask = kvm_pmu_valid_counter_mask(vcpu);
+
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ if (pmu_access_el0_disabled(vcpu))
+ return false;
+
+ if (p->is_write) {
+ if (r->CRm & 0x2)
+ /* accessing PMOVSSET_EL0 */
+ kvm_pmu_overflow_set(vcpu, p->regval & mask);
+ else
+ /* accessing PMOVSCLR_EL0 */
+ vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
+ } else {
+ p->regval = vcpu_sys_reg(vcpu, PMOVSSET_EL0) & mask;
+ }
+
+ return true;
+}
+
+static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ u64 mask;
+
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ if (pmu_write_swinc_el0_disabled(vcpu))
+ return false;
+
+ if (p->is_write) {
+ mask = kvm_pmu_valid_counter_mask(vcpu);
+ kvm_pmu_software_increment(vcpu, p->regval & mask);
+ return true;
+ }
+
+ return false;
+}
+
+static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
+ const struct sys_reg_desc *r)
+{
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return trap_raz_wi(vcpu, p, r);
+
+ if (p->is_write) {
+ if (!vcpu_mode_priv(vcpu))
+ return false;
+
+ vcpu_sys_reg(vcpu, PMUSERENR_EL0) = p->regval
+ & ARMV8_PMU_USERENR_MASK;
+ } else {
+ p->regval = vcpu_sys_reg(vcpu, PMUSERENR_EL0)
+ & ARMV8_PMU_USERENR_MASK;
+ }
+
+ return true;
+}
+
/* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
#define DBG_BCR_BVR_WCR_WVR_EL1(n) \
/* DBGBVRn_EL1 */ \
@@ -454,6 +794,20 @@
{ Op0(0b10), Op1(0b000), CRn(0b0000), CRm((n)), Op2(0b111), \
trap_wcr, reset_wcr, n, 0, get_wcr, set_wcr }
+/* Macro to expand the PMEVCNTRn_EL0 register */
+#define PMU_PMEVCNTR_EL0(n) \
+ /* PMEVCNTRn_EL0 */ \
+ { Op0(0b11), Op1(0b011), CRn(0b1110), \
+ CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
+ access_pmu_evcntr, reset_unknown, (PMEVCNTR0_EL0 + n), }
+
+/* Macro to expand the PMEVTYPERn_EL0 register */
+#define PMU_PMEVTYPER_EL0(n) \
+ /* PMEVTYPERn_EL0 */ \
+ { Op0(0b11), Op1(0b011), CRn(0b1110), \
+ CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
+ access_pmu_evtyper, reset_unknown, (PMEVTYPER0_EL0 + n), }
+
/*
* Architected system registers.
* Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
@@ -583,10 +937,10 @@
/* PMINTENSET_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001),
- trap_raz_wi },
+ access_pminten, reset_unknown, PMINTENSET_EL1 },
/* PMINTENCLR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010),
- trap_raz_wi },
+ access_pminten, NULL, PMINTENSET_EL1 },
/* MAIR_EL1 */
{ Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000),
@@ -623,43 +977,46 @@
/* PMCR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000),
- trap_raz_wi },
+ access_pmcr, reset_pmcr, },
/* PMCNTENSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001),
- trap_raz_wi },
+ access_pmcnten, reset_unknown, PMCNTENSET_EL0 },
/* PMCNTENCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010),
- trap_raz_wi },
+ access_pmcnten, NULL, PMCNTENSET_EL0 },
/* PMOVSCLR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011),
- trap_raz_wi },
+ access_pmovs, NULL, PMOVSSET_EL0 },
/* PMSWINC_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100),
- trap_raz_wi },
+ access_pmswinc, reset_unknown, PMSWINC_EL0 },
/* PMSELR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101),
- trap_raz_wi },
+ access_pmselr, reset_unknown, PMSELR_EL0 },
/* PMCEID0_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110),
- trap_raz_wi },
+ access_pmceid },
/* PMCEID1_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111),
- trap_raz_wi },
+ access_pmceid },
/* PMCCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000),
- trap_raz_wi },
+ access_pmu_evcntr, reset_unknown, PMCCNTR_EL0 },
/* PMXEVTYPER_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001),
- trap_raz_wi },
+ access_pmu_evtyper },
/* PMXEVCNTR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010),
- trap_raz_wi },
- /* PMUSERENR_EL0 */
+ access_pmu_evcntr },
+ /* PMUSERENR_EL0
+ * This register resets as unknown in 64bit mode while it resets as zero
+ * in 32bit mode. Here we choose to reset it as zero for consistency.
+ */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000),
- trap_raz_wi },
+ access_pmuserenr, reset_val, PMUSERENR_EL0, 0 },
/* PMOVSSET_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011),
- trap_raz_wi },
+ access_pmovs, reset_unknown, PMOVSSET_EL0 },
/* TPIDR_EL0 */
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010),
@@ -668,6 +1025,77 @@
{ Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011),
NULL, reset_unknown, TPIDRRO_EL0 },
+ /* PMEVCNTRn_EL0 */
+ PMU_PMEVCNTR_EL0(0),
+ PMU_PMEVCNTR_EL0(1),
+ PMU_PMEVCNTR_EL0(2),
+ PMU_PMEVCNTR_EL0(3),
+ PMU_PMEVCNTR_EL0(4),
+ PMU_PMEVCNTR_EL0(5),
+ PMU_PMEVCNTR_EL0(6),
+ PMU_PMEVCNTR_EL0(7),
+ PMU_PMEVCNTR_EL0(8),
+ PMU_PMEVCNTR_EL0(9),
+ PMU_PMEVCNTR_EL0(10),
+ PMU_PMEVCNTR_EL0(11),
+ PMU_PMEVCNTR_EL0(12),
+ PMU_PMEVCNTR_EL0(13),
+ PMU_PMEVCNTR_EL0(14),
+ PMU_PMEVCNTR_EL0(15),
+ PMU_PMEVCNTR_EL0(16),
+ PMU_PMEVCNTR_EL0(17),
+ PMU_PMEVCNTR_EL0(18),
+ PMU_PMEVCNTR_EL0(19),
+ PMU_PMEVCNTR_EL0(20),
+ PMU_PMEVCNTR_EL0(21),
+ PMU_PMEVCNTR_EL0(22),
+ PMU_PMEVCNTR_EL0(23),
+ PMU_PMEVCNTR_EL0(24),
+ PMU_PMEVCNTR_EL0(25),
+ PMU_PMEVCNTR_EL0(26),
+ PMU_PMEVCNTR_EL0(27),
+ PMU_PMEVCNTR_EL0(28),
+ PMU_PMEVCNTR_EL0(29),
+ PMU_PMEVCNTR_EL0(30),
+ /* PMEVTYPERn_EL0 */
+ PMU_PMEVTYPER_EL0(0),
+ PMU_PMEVTYPER_EL0(1),
+ PMU_PMEVTYPER_EL0(2),
+ PMU_PMEVTYPER_EL0(3),
+ PMU_PMEVTYPER_EL0(4),
+ PMU_PMEVTYPER_EL0(5),
+ PMU_PMEVTYPER_EL0(6),
+ PMU_PMEVTYPER_EL0(7),
+ PMU_PMEVTYPER_EL0(8),
+ PMU_PMEVTYPER_EL0(9),
+ PMU_PMEVTYPER_EL0(10),
+ PMU_PMEVTYPER_EL0(11),
+ PMU_PMEVTYPER_EL0(12),
+ PMU_PMEVTYPER_EL0(13),
+ PMU_PMEVTYPER_EL0(14),
+ PMU_PMEVTYPER_EL0(15),
+ PMU_PMEVTYPER_EL0(16),
+ PMU_PMEVTYPER_EL0(17),
+ PMU_PMEVTYPER_EL0(18),
+ PMU_PMEVTYPER_EL0(19),
+ PMU_PMEVTYPER_EL0(20),
+ PMU_PMEVTYPER_EL0(21),
+ PMU_PMEVTYPER_EL0(22),
+ PMU_PMEVTYPER_EL0(23),
+ PMU_PMEVTYPER_EL0(24),
+ PMU_PMEVTYPER_EL0(25),
+ PMU_PMEVTYPER_EL0(26),
+ PMU_PMEVTYPER_EL0(27),
+ PMU_PMEVTYPER_EL0(28),
+ PMU_PMEVTYPER_EL0(29),
+ PMU_PMEVTYPER_EL0(30),
+ /* PMCCFILTR_EL0
+ * This register resets as unknown in 64bit mode while it resets as zero
+ * in 32bit mode. Here we choose to reset it as zero for consistency.
+ */
+ { Op0(0b11), Op1(0b011), CRn(0b1110), CRm(0b1111), Op2(0b111),
+ access_pmu_evtyper, reset_val, PMCCFILTR_EL0, 0 },
+
/* DACR32_EL2 */
{ Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000),
NULL, reset_unknown, DACR32_EL2 },
@@ -857,6 +1285,20 @@
{ Op1( 0), CRm( 2), .access = trap_raz_wi },
};
+/* Macro to expand the PMEVCNTRn register */
+#define PMU_PMEVCNTR(n) \
+ /* PMEVCNTRn */ \
+ { Op1(0), CRn(0b1110), \
+ CRm((0b1000 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
+ access_pmu_evcntr }
+
+/* Macro to expand the PMEVTYPERn register */
+#define PMU_PMEVTYPER(n) \
+ /* PMEVTYPERn */ \
+ { Op1(0), CRn(0b1110), \
+ CRm((0b1100 | (((n) >> 3) & 0x3))), Op2(((n) & 0x7)), \
+ access_pmu_evtyper }
+
/*
* Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
* depending on the way they are accessed (as a 32bit or a 64bit
@@ -885,19 +1327,21 @@
{ Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
/* PMU */
- { Op1( 0), CRn( 9), CRm(12), Op2( 0), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(12), Op2( 1), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(12), Op2( 2), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(12), Op2( 3), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(12), Op2( 5), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(12), Op2( 6), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(12), Op2( 7), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(13), Op2( 0), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(13), Op2( 1), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(13), Op2( 2), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(14), Op2( 0), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(14), Op2( 1), trap_raz_wi },
- { Op1( 0), CRn( 9), CRm(14), Op2( 2), trap_raz_wi },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 0), access_pmcr },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 1), access_pmcnten },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 2), access_pmcnten },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 3), access_pmovs },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 4), access_pmswinc },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 5), access_pmselr },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 6), access_pmceid },
+ { Op1( 0), CRn( 9), CRm(12), Op2( 7), access_pmceid },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 0), access_pmu_evcntr },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 1), access_pmu_evtyper },
+ { Op1( 0), CRn( 9), CRm(13), Op2( 2), access_pmu_evcntr },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 0), access_pmuserenr },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 1), access_pminten },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 2), access_pminten },
+ { Op1( 0), CRn( 9), CRm(14), Op2( 3), access_pmovs },
{ Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR },
{ Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR },
@@ -908,10 +1352,78 @@
{ Op1( 0), CRn(12), CRm(12), Op2( 5), trap_raz_wi },
{ Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID },
+
+ /* PMEVCNTRn */
+ PMU_PMEVCNTR(0),
+ PMU_PMEVCNTR(1),
+ PMU_PMEVCNTR(2),
+ PMU_PMEVCNTR(3),
+ PMU_PMEVCNTR(4),
+ PMU_PMEVCNTR(5),
+ PMU_PMEVCNTR(6),
+ PMU_PMEVCNTR(7),
+ PMU_PMEVCNTR(8),
+ PMU_PMEVCNTR(9),
+ PMU_PMEVCNTR(10),
+ PMU_PMEVCNTR(11),
+ PMU_PMEVCNTR(12),
+ PMU_PMEVCNTR(13),
+ PMU_PMEVCNTR(14),
+ PMU_PMEVCNTR(15),
+ PMU_PMEVCNTR(16),
+ PMU_PMEVCNTR(17),
+ PMU_PMEVCNTR(18),
+ PMU_PMEVCNTR(19),
+ PMU_PMEVCNTR(20),
+ PMU_PMEVCNTR(21),
+ PMU_PMEVCNTR(22),
+ PMU_PMEVCNTR(23),
+ PMU_PMEVCNTR(24),
+ PMU_PMEVCNTR(25),
+ PMU_PMEVCNTR(26),
+ PMU_PMEVCNTR(27),
+ PMU_PMEVCNTR(28),
+ PMU_PMEVCNTR(29),
+ PMU_PMEVCNTR(30),
+ /* PMEVTYPERn */
+ PMU_PMEVTYPER(0),
+ PMU_PMEVTYPER(1),
+ PMU_PMEVTYPER(2),
+ PMU_PMEVTYPER(3),
+ PMU_PMEVTYPER(4),
+ PMU_PMEVTYPER(5),
+ PMU_PMEVTYPER(6),
+ PMU_PMEVTYPER(7),
+ PMU_PMEVTYPER(8),
+ PMU_PMEVTYPER(9),
+ PMU_PMEVTYPER(10),
+ PMU_PMEVTYPER(11),
+ PMU_PMEVTYPER(12),
+ PMU_PMEVTYPER(13),
+ PMU_PMEVTYPER(14),
+ PMU_PMEVTYPER(15),
+ PMU_PMEVTYPER(16),
+ PMU_PMEVTYPER(17),
+ PMU_PMEVTYPER(18),
+ PMU_PMEVTYPER(19),
+ PMU_PMEVTYPER(20),
+ PMU_PMEVTYPER(21),
+ PMU_PMEVTYPER(22),
+ PMU_PMEVTYPER(23),
+ PMU_PMEVTYPER(24),
+ PMU_PMEVTYPER(25),
+ PMU_PMEVTYPER(26),
+ PMU_PMEVTYPER(27),
+ PMU_PMEVTYPER(28),
+ PMU_PMEVTYPER(29),
+ PMU_PMEVTYPER(30),
+ /* PMCCFILTR */
+ { Op1(0), CRn(14), CRm(15), Op2(7), access_pmu_evtyper },
};
static const struct sys_reg_desc cp15_64_regs[] = {
{ Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 },
+ { Op1( 0), CRn( 0), CRm( 9), Op2( 0), access_pmu_evcntr },
{ Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi },
{ Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 },
};
@@ -942,29 +1454,32 @@
}
}
+#define reg_to_match_value(x) \
+ ({ \
+ unsigned long val; \
+ val = (x)->Op0 << 14; \
+ val |= (x)->Op1 << 11; \
+ val |= (x)->CRn << 7; \
+ val |= (x)->CRm << 3; \
+ val |= (x)->Op2; \
+ val; \
+ })
+
+static int match_sys_reg(const void *key, const void *elt)
+{
+ const unsigned long pval = (unsigned long)key;
+ const struct sys_reg_desc *r = elt;
+
+ return pval - reg_to_match_value(r);
+}
+
static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params,
const struct sys_reg_desc table[],
unsigned int num)
{
- unsigned int i;
+ unsigned long pval = reg_to_match_value(params);
- for (i = 0; i < num; i++) {
- const struct sys_reg_desc *r = &table[i];
-
- if (params->Op0 != r->Op0)
- continue;
- if (params->Op1 != r->Op1)
- continue;
- if (params->CRn != r->CRn)
- continue;
- if (params->CRm != r->CRm)
- continue;
- if (params->Op2 != r->Op2)
- continue;
-
- return r;
- }
- return NULL;
+ return bsearch((void *)pval, table, num, sizeof(table[0]), match_sys_reg);
}
int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run)
diff --git a/arch/arm64/lib/strnlen.S b/arch/arm64/lib/strnlen.S
index 2ca6657..eae38da 100644
--- a/arch/arm64/lib/strnlen.S
+++ b/arch/arm64/lib/strnlen.S
@@ -168,4 +168,4 @@
.Lhit_limit:
mov len, limit
ret
-ENDPROC(strnlen)
+ENDPIPROC(strnlen)
diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c
index 331c4ca..a6e757c 100644
--- a/arch/arm64/mm/dma-mapping.c
+++ b/arch/arm64/mm/dma-mapping.c
@@ -933,6 +933,10 @@
ret = register_iommu_dma_ops_notifier(&platform_bus_type);
if (!ret)
ret = register_iommu_dma_ops_notifier(&amba_bustype);
+
+ /* handle devices queued before this arch_initcall */
+ if (!ret)
+ __iommu_attach_notifier(NULL, BUS_NOTIFY_ADD_DEVICE, NULL);
return ret;
}
arch_initcall(__iommu_dma_init);
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 92ddac1..abe2a95 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -371,6 +371,13 @@
return 0;
}
+static int do_alignment_fault(unsigned long addr, unsigned int esr,
+ struct pt_regs *regs)
+{
+ do_bad_area(addr, esr, regs);
+ return 0;
+}
+
/*
* This abort handler always returns "fault".
*/
@@ -418,7 +425,7 @@
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
{ do_bad, SIGBUS, 0, "synchronous parity error (translation table walk)" },
{ do_bad, SIGBUS, 0, "unknown 32" },
- { do_bad, SIGBUS, BUS_ADRALN, "alignment fault" },
+ { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" },
{ do_bad, SIGBUS, 0, "unknown 34" },
{ do_bad, SIGBUS, 0, "unknown 35" },
{ do_bad, SIGBUS, 0, "unknown 36" },
diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c
index 4c893b5..232f787 100644
--- a/arch/arm64/mm/mmap.c
+++ b/arch/arm64/mm/mmap.c
@@ -53,10 +53,10 @@
#ifdef CONFIG_COMPAT
if (test_thread_flag(TIF_32BIT))
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
else
#endif
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
diff --git a/arch/m68k/configs/amiga_defconfig b/arch/m68k/configs/amiga_defconfig
index fc96e81..d1fc479 100644
--- a/arch/m68k/configs/amiga_defconfig
+++ b/arch/m68k/configs/amiga_defconfig
@@ -108,6 +108,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@
CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -366,6 +374,7 @@
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
CONFIG_HYDRA=y
CONFIG_APNE=y
CONFIG_ZORRO8390=y
diff --git a/arch/m68k/configs/apollo_defconfig b/arch/m68k/configs/apollo_defconfig
index 05c904f..9bfe8be 100644
--- a/arch/m68k/configs/apollo_defconfig
+++ b/arch/m68k/configs/apollo_defconfig
@@ -106,6 +106,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@
CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -344,6 +352,7 @@
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RENESAS is not set
# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/atari_defconfig b/arch/m68k/configs/atari_defconfig
index d572b73..ebdcfae 100644
--- a/arch/m68k/configs/atari_defconfig
+++ b/arch/m68k/configs/atari_defconfig
@@ -106,6 +106,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@
CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -353,6 +361,7 @@
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
CONFIG_NE2000=y
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/bvme6000_defconfig b/arch/m68k/configs/bvme6000_defconfig
index 11a30c6..8acc65e 100644
--- a/arch/m68k/configs/bvme6000_defconfig
+++ b/arch/m68k/configs/bvme6000_defconfig
@@ -104,6 +104,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@
CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RENESAS is not set
# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/hp300_defconfig b/arch/m68k/configs/hp300_defconfig
index 6630a51..0c6a3d5 100644
--- a/arch/m68k/configs/hp300_defconfig
+++ b/arch/m68k/configs/hp300_defconfig
@@ -106,6 +106,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -264,6 +266,12 @@
CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -345,6 +353,7 @@
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RENESAS is not set
# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/mac_defconfig b/arch/m68k/configs/mac_defconfig
index 1d90b71..12a8a6c 100644
--- a/arch/m68k/configs/mac_defconfig
+++ b/arch/m68k/configs/mac_defconfig
@@ -105,6 +105,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -266,6 +268,12 @@
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -362,6 +370,7 @@
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
CONFIG_MACSONIC=y
+# CONFIG_NET_VENDOR_NETRONOME is not set
CONFIG_MAC8390=y
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/multi_defconfig b/arch/m68k/configs/multi_defconfig
index 1fd21c1..64ff2dc 100644
--- a/arch/m68k/configs/multi_defconfig
+++ b/arch/m68k/configs/multi_defconfig
@@ -115,6 +115,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -276,6 +278,12 @@
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -404,6 +412,7 @@
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
CONFIG_MACSONIC=y
+# CONFIG_NET_VENDOR_NETRONOME is not set
CONFIG_HYDRA=y
CONFIG_MAC8390=y
CONFIG_NE2000=y
diff --git a/arch/m68k/configs/mvme147_defconfig b/arch/m68k/configs/mvme147_defconfig
index 74e10f7..07fc6ab 100644
--- a/arch/m68k/configs/mvme147_defconfig
+++ b/arch/m68k/configs/mvme147_defconfig
@@ -103,6 +103,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -261,6 +263,12 @@
CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RENESAS is not set
# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/mvme16x_defconfig b/arch/m68k/configs/mvme16x_defconfig
index 7034e71..69903de 100644
--- a/arch/m68k/configs/mvme16x_defconfig
+++ b/arch/m68k/configs/mvme16x_defconfig
@@ -104,6 +104,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@
CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -343,6 +351,7 @@
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RENESAS is not set
# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/q40_defconfig b/arch/m68k/configs/q40_defconfig
index f7deb5f..bd84016 100644
--- a/arch/m68k/configs/q40_defconfig
+++ b/arch/m68k/configs/q40_defconfig
@@ -104,6 +104,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -262,6 +264,12 @@
CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -352,6 +360,7 @@
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
CONFIG_NE2000=y
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RENESAS is not set
diff --git a/arch/m68k/configs/sun3_defconfig b/arch/m68k/configs/sun3_defconfig
index 0ce79eb..5f9fb3a 100644
--- a/arch/m68k/configs/sun3_defconfig
+++ b/arch/m68k/configs/sun3_defconfig
@@ -101,6 +101,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@
CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -340,6 +348,7 @@
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RENESAS is not set
# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/configs/sun3x_defconfig b/arch/m68k/configs/sun3x_defconfig
index 4cb787e..5d1c674 100644
--- a/arch/m68k/configs/sun3x_defconfig
+++ b/arch/m68k/configs/sun3x_defconfig
@@ -101,6 +101,8 @@
CONFIG_NFT_QUEUE=m
CONFIG_NFT_REJECT=m
CONFIG_NFT_COMPAT=m
+CONFIG_NFT_DUP_NETDEV=m
+CONFIG_NFT_FWD_NETDEV=m
CONFIG_NETFILTER_XT_SET=m
CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m
@@ -259,6 +261,12 @@
CONFIG_BRIDGE=m
CONFIG_ATALK=m
CONFIG_6LOWPAN=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_HOP=m
+CONFIG_6LOWPAN_GHC_UDP=m
+CONFIG_6LOWPAN_GHC_ICMPV6=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_DEST=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_FRAG=m
+CONFIG_6LOWPAN_GHC_EXT_HDR_ROUTE=m
CONFIG_DNS_RESOLVER=y
CONFIG_BATMAN_ADV=m
CONFIG_BATMAN_ADV_DAT=y
@@ -341,6 +349,7 @@
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
# CONFIG_NET_VENDOR_QUALCOMM is not set
# CONFIG_NET_VENDOR_RENESAS is not set
# CONFIG_NET_VENDOR_ROCKER is not set
diff --git a/arch/m68k/include/asm/unistd.h b/arch/m68k/include/asm/unistd.h
index f9d96bf..bafaff6 100644
--- a/arch/m68k/include/asm/unistd.h
+++ b/arch/m68k/include/asm/unistd.h
@@ -4,7 +4,7 @@
#include <uapi/asm/unistd.h>
-#define NR_syscalls 376
+#define NR_syscalls 377
#define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_OLD_STAT
diff --git a/arch/m68k/include/uapi/asm/unistd.h b/arch/m68k/include/uapi/asm/unistd.h
index 36cf129..0ca7296 100644
--- a/arch/m68k/include/uapi/asm/unistd.h
+++ b/arch/m68k/include/uapi/asm/unistd.h
@@ -381,5 +381,6 @@
#define __NR_userfaultfd 373
#define __NR_membarrier 374
#define __NR_mlock2 375
+#define __NR_copy_file_range 376
#endif /* _UAPI_ASM_M68K_UNISTD_H_ */
diff --git a/arch/m68k/kernel/syscalltable.S b/arch/m68k/kernel/syscalltable.S
index 282cd90..8bb9426 100644
--- a/arch/m68k/kernel/syscalltable.S
+++ b/arch/m68k/kernel/syscalltable.S
@@ -396,3 +396,4 @@
.long sys_userfaultfd
.long sys_membarrier
.long sys_mlock2 /* 375 */
+ .long sys_copy_file_range
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 57a945e..74a3db9 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -2085,7 +2085,7 @@
config PAGE_SIZE_64KB
bool "64kB"
- depends on !CPU_R3000 && !CPU_TX39XX
+ depends on !CPU_R3000 && !CPU_TX39XX && !CPU_R6000
help
Using 64kB page size will result in higher performance kernel at
the price of higher memory consumption. This option is available on
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index cefb7a5..e090fc3 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -227,7 +227,7 @@
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
- if (__h->e_machine != EM_MIPS) \
+ if (!mips_elf_check_machine(__h)) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
@@ -258,7 +258,7 @@
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
- if (__h->e_machine != EM_MIPS) \
+ if (!mips_elf_check_machine(__h)) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS64) \
__res = 0; \
@@ -285,6 +285,11 @@
#endif /* !defined(ELF_ARCH) */
+#define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS)
+
+#define vmcore_elf32_check_arch mips_elf_check_machine
+#define vmcore_elf64_check_arch mips_elf_check_machine
+
struct mips_abi;
extern struct mips_abi mips_abi;
diff --git a/arch/mips/include/asm/fpu.h b/arch/mips/include/asm/fpu.h
index 9cbf383..f06f97bd 100644
--- a/arch/mips/include/asm/fpu.h
+++ b/arch/mips/include/asm/fpu.h
@@ -179,6 +179,10 @@
if (save)
_save_fp(tsk);
__disable_fpu();
+ } else {
+ /* FPU should not have been left enabled with no owner */
+ WARN(read_c0_status() & ST0_CU1,
+ "Orphaned FPU left enabled");
}
KSTK_STATUS(tsk) &= ~ST0_CU1;
clear_tsk_thread_flag(tsk, TIF_USEDFPU);
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index 8ebd3f57..3ed10a8 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -128,7 +128,8 @@
case OCTEON_FEATURE_PCIE:
return OCTEON_IS_MODEL(OCTEON_CN56XX)
|| OCTEON_IS_MODEL(OCTEON_CN52XX)
- || OCTEON_IS_MODEL(OCTEON_CN6XXX);
+ || OCTEON_IS_MODEL(OCTEON_CN6XXX)
+ || OCTEON_IS_MODEL(OCTEON_CN7XXX);
case OCTEON_FEATURE_SRIO:
return OCTEON_IS_MODEL(OCTEON_CN63XX)
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 3f832c3..041153f 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -45,7 +45,7 @@
* User space process size: 2GB. This is hardcoded into a few places,
* so don't change it unless you know what you are doing.
*/
-#define TASK_SIZE 0x7fff8000UL
+#define TASK_SIZE 0x80000000UL
#endif
#define STACK_TOP_MAX TASK_SIZE
diff --git a/arch/mips/include/asm/stackframe.h b/arch/mips/include/asm/stackframe.h
index a71da57..eebf395 100644
--- a/arch/mips/include/asm/stackframe.h
+++ b/arch/mips/include/asm/stackframe.h
@@ -289,7 +289,7 @@
.set reorder
.set noat
mfc0 a0, CP0_STATUS
- li v1, 0xff00
+ li v1, ST0_CU1 | ST0_IM
ori a0, STATMASK
xori a0, STATMASK
mtc0 a0, CP0_STATUS
@@ -330,7 +330,7 @@
ori a0, STATMASK
xori a0, STATMASK
mtc0 a0, CP0_STATUS
- li v1, 0xff00
+ li v1, ST0_CU1 | ST0_FR | ST0_IM
and a0, v1
LONG_L v0, PT_STATUS(sp)
nor v1, $0, v1
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 6499d93..47bc45a 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -101,10 +101,8 @@
/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
if ((config_enabled(CONFIG_32BIT) ||
test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
- (regs->regs[2] == __NR_syscall)) {
+ (regs->regs[2] == __NR_syscall))
i++;
- n++;
- }
while (n--)
ret |= mips_get_syscall_arg(args++, task, regs, i++);
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 90f03a7..3129795 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -380,16 +380,17 @@
#define __NR_userfaultfd (__NR_Linux + 357)
#define __NR_membarrier (__NR_Linux + 358)
#define __NR_mlock2 (__NR_Linux + 359)
+#define __NR_copy_file_range (__NR_Linux + 360)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 359
+#define __NR_Linux_syscalls 360
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 359
+#define __NR_O32_Linux_syscalls 360
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -717,16 +718,17 @@
#define __NR_userfaultfd (__NR_Linux + 317)
#define __NR_membarrier (__NR_Linux + 318)
#define __NR_mlock2 (__NR_Linux + 319)
+#define __NR_copy_file_range (__NR_Linux + 320)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 319
+#define __NR_Linux_syscalls 320
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 319
+#define __NR_64_Linux_syscalls 320
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1058,15 +1060,16 @@
#define __NR_userfaultfd (__NR_Linux + 321)
#define __NR_membarrier (__NR_Linux + 322)
#define __NR_mlock2 (__NR_Linux + 323)
+#define __NR_copy_file_range (__NR_Linux + 324)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 323
+#define __NR_Linux_syscalls 324
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 323
+#define __NR_N32_Linux_syscalls 324
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 1188e00..1b992c6 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -35,7 +35,7 @@
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
- if (__h->e_machine != EM_MIPS) \
+ if (!mips_elf_check_machine(__h)) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index 9287678..abd3aff 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -47,7 +47,7 @@
int __res = 1; \
struct elfhdr *__h = (hdr); \
\
- if (__h->e_machine != EM_MIPS) \
+ if (!mips_elf_check_machine(__h)) \
__res = 0; \
if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
__res = 0; \
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index f2975d4..eddd5fd 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -65,12 +65,10 @@
status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_FR|KU_MASK);
status |= KU_USER;
regs->cp0_status = status;
- clear_used_math();
- clear_fpu_owner();
- init_dsp();
- clear_thread_flag(TIF_USEDMSA);
+ lose_fpu(0);
clear_thread_flag(TIF_MSA_CTX_LIVE);
- disable_msa();
+ clear_used_math();
+ init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
}
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index 2d23c83..a563174 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -595,3 +595,4 @@
PTR sys_userfaultfd
PTR sys_membarrier
PTR sys_mlock2
+ PTR sys_copy_file_range /* 4360 */
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index deac633..2b2dc14 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -433,4 +433,5 @@
PTR sys_userfaultfd
PTR sys_membarrier
PTR sys_mlock2
+ PTR sys_copy_file_range /* 5320 */
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 5a69eb4..2bf5c85 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -423,4 +423,5 @@
PTR sys_userfaultfd
PTR sys_membarrier
PTR sys_mlock2
+ PTR sys_copy_file_range
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index e4b6d7c..c5b759e 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -578,4 +578,5 @@
PTR sys_userfaultfd
PTR sys_membarrier
PTR sys_mlock2
+ PTR sys_copy_file_range /* 4360 */
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 569a7d5..5fdaf8b 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -782,6 +782,7 @@
void __init setup_arch(char **cmdline_p)
{
cpu_probe();
+ mips_cm_probe();
prom_init();
setup_early_fdc_console();
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bafcb7a..ae790c5 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -663,7 +663,7 @@
return -1;
}
-static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned short opcode)
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
{
if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
int rd = (opcode & MM_RS) >> 16;
@@ -1119,11 +1119,12 @@
if (get_isa16_mode(regs->cp0_epc)) {
unsigned short mmop[2] = { 0 };
- if (unlikely(get_user(mmop[0], epc) < 0))
+ if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
status = SIGSEGV;
- if (unlikely(get_user(mmop[1], epc) < 0))
+ if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
status = SIGSEGV;
- opcode = (mmop[0] << 16) | mmop[1];
+ opcode = mmop[0];
+ opcode = (opcode << 16) | mmop[1];
if (status < 0)
status = simulate_rdhwr_mm(regs, opcode);
@@ -1369,26 +1370,12 @@
if (unlikely(compute_return_epc(regs) < 0))
break;
- if (get_isa16_mode(regs->cp0_epc)) {
- unsigned short mmop[2] = { 0 };
-
- if (unlikely(get_user(mmop[0], epc) < 0))
- status = SIGSEGV;
- if (unlikely(get_user(mmop[1], epc) < 0))
- status = SIGSEGV;
- opcode = (mmop[0] << 16) | mmop[1];
-
- if (status < 0)
- status = simulate_rdhwr_mm(regs, opcode);
- } else {
+ if (!get_isa16_mode(regs->cp0_epc)) {
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
if (!cpu_has_llsc && status < 0)
status = simulate_llsc(regs, opcode);
-
- if (status < 0)
- status = simulate_rdhwr_normal(regs, opcode);
}
if (status < 0)
diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
index 5c81fdd..3530376 100644
--- a/arch/mips/mm/mmap.c
+++ b/arch/mips/mm/mmap.c
@@ -146,7 +146,7 @@
{
unsigned long rnd;
- rnd = (unsigned long)get_random_int();
+ rnd = get_random_long();
rnd <<= PAGE_SHIFT;
if (TASK_IS_32BIT_ADDR)
rnd &= 0xfffffful;
@@ -174,7 +174,7 @@
static inline unsigned long brk_rnd(void)
{
- unsigned long rnd = get_random_int();
+ unsigned long rnd = get_random_long();
rnd = rnd << PAGE_SHIFT;
/* 8MB for 32bit, 256MB for 64bit */
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 3bd0597..2496475 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -181,10 +181,6 @@
return 1;
}
-void __weak platform_early_l2_init(void)
-{
-}
-
static inline int __init mips_sc_probe(void)
{
struct cpuinfo_mips *c = ¤t_cpu_data;
@@ -194,12 +190,6 @@
/* Mark as not present until probe completed */
c->scache.flags |= MIPS_CACHE_NOT_PRESENT;
- /*
- * Do we need some platform specific probing before
- * we configure L2?
- */
- platform_early_l2_init();
-
if (mips_cm_revision() >= CM_REV_CM3)
return mips_sc_probe_cm3();
diff --git a/arch/mips/mti-malta/malta-init.c b/arch/mips/mti-malta/malta-init.c
index 571148c..dc2c521 100644
--- a/arch/mips/mti-malta/malta-init.c
+++ b/arch/mips/mti-malta/malta-init.c
@@ -293,7 +293,6 @@
console_config();
#endif
/* Early detection of CMP support */
- mips_cm_probe();
mips_cpc_probe();
if (!register_cps_smp_ops())
@@ -304,10 +303,3 @@
return;
register_up_smp_ops();
}
-
-void platform_early_l2_init(void)
-{
- /* L2 configuration lives in the CM3 */
- if (mips_cm_revision() >= CM_REV_CM3)
- mips_cm_probe();
-}
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index a009ee4..1ae932c2 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -297,12 +297,12 @@
return PTR_ERR(rstpcie0);
bridge_base = devm_ioremap_resource(&pdev->dev, bridge_res);
- if (!bridge_base)
- return -ENOMEM;
+ if (IS_ERR(bridge_base))
+ return PTR_ERR(bridge_base);
pcie_base = devm_ioremap_resource(&pdev->dev, pcie_res);
- if (!pcie_base)
- return -ENOMEM;
+ if (IS_ERR(pcie_base))
+ return PTR_ERR(pcie_base);
iomem_resource.start = 0;
iomem_resource.end = ~0;
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index e4824fd..9faa18c 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -557,7 +557,7 @@
config PPC_4K_PAGES
bool "4k page size"
- select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
+ select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
config PPC_16K_PAGES
bool "16k page size"
@@ -566,7 +566,7 @@
config PPC_64K_PAGES
bool "64k page size"
depends on !PPC_FSL_BOOK3E && (44x || PPC_STD_MMU_64 || PPC_BOOK3E_64)
- select HAVE_ARCH_SOFT_DIRTY if CHECKPOINT_RESTORE && PPC_BOOK3S
+ select HAVE_ARCH_SOFT_DIRTY if PPC_BOOK3S_64
config PPC_256K_PAGES
bool "256k page size"
diff --git a/arch/powerpc/include/asm/book3s/64/pgtable.h b/arch/powerpc/include/asm/book3s/64/pgtable.h
index 8d1c41d..ac07a30 100644
--- a/arch/powerpc/include/asm/book3s/64/pgtable.h
+++ b/arch/powerpc/include/asm/book3s/64/pgtable.h
@@ -281,6 +281,10 @@
extern void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp);
+#define __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
+extern void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+
#define pmd_move_must_withdraw pmd_move_must_withdraw
struct spinlock;
static inline int pmd_move_must_withdraw(struct spinlock *new_pmd_ptl,
diff --git a/arch/powerpc/include/asm/eeh.h b/arch/powerpc/include/asm/eeh.h
index c5eb86f..867c39b 100644
--- a/arch/powerpc/include/asm/eeh.h
+++ b/arch/powerpc/include/asm/eeh.h
@@ -81,6 +81,7 @@
#define EEH_PE_KEEP (1 << 8) /* Keep PE on hotplug */
#define EEH_PE_CFG_RESTRICTED (1 << 9) /* Block config on error */
#define EEH_PE_REMOVED (1 << 10) /* Removed permanently */
+#define EEH_PE_PRI_BUS (1 << 11) /* Cached primary bus */
struct eeh_pe {
int type; /* PE type: PHB/Bus/Device */
diff --git a/arch/powerpc/include/asm/kvm_book3s_64.h b/arch/powerpc/include/asm/kvm_book3s_64.h
index 2aa79c8..7529aab 100644
--- a/arch/powerpc/include/asm/kvm_book3s_64.h
+++ b/arch/powerpc/include/asm/kvm_book3s_64.h
@@ -33,8 +33,6 @@
}
#endif
-#define SPAPR_TCE_SHIFT 12
-
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
#define KVM_DEFAULT_HPT_ORDER 24 /* 16MB HPT by default */
#endif
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 9d08d8c..2e7c791 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -182,7 +182,10 @@
struct list_head list;
struct kvm *kvm;
u64 liobn;
- u32 window_size;
+ struct rcu_head rcu;
+ u32 page_shift;
+ u64 offset; /* in pages */
+ u64 size; /* window size in pages */
struct page *pages[0];
};
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 2241d53..2544eda 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -165,9 +165,25 @@
extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
- struct kvm_create_spapr_tce *args);
+ struct kvm_create_spapr_tce_64 *args);
+extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
+ struct kvm_vcpu *vcpu, unsigned long liobn);
+extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
+ unsigned long ioba, unsigned long npages);
+extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
+ unsigned long tce);
+extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
+ unsigned long *ua, unsigned long **prmap);
+extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
+ unsigned long idx, unsigned long tce);
extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba, unsigned long tce);
+extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_list, unsigned long npages);
+extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_value, unsigned long npages);
extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba);
extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
@@ -437,6 +453,8 @@
{
return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
}
+extern void kvmppc_alloc_host_rm_ops(void);
+extern void kvmppc_free_host_rm_ops(void);
extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
@@ -445,7 +463,11 @@
extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
struct kvm_vcpu *vcpu, u32 cpu);
+extern void kvmppc_xics_ipi_action(void);
+extern int h_ipi_redirect;
#else
+static inline void kvmppc_alloc_host_rm_ops(void) {};
+static inline void kvmppc_free_host_rm_ops(void) {};
static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
{ return 0; }
static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
@@ -459,6 +481,33 @@
{ return 0; }
#endif
+/*
+ * Host-side operations we want to set up while running in real
+ * mode in the guest operating on the xics.
+ * Currently only VCPU wakeup is supported.
+ */
+
+union kvmppc_rm_state {
+ unsigned long raw;
+ struct {
+ u32 in_host;
+ u32 rm_action;
+ };
+};
+
+struct kvmppc_host_rm_core {
+ union kvmppc_rm_state rm_state;
+ void *rm_data;
+ char pad[112];
+};
+
+struct kvmppc_host_rm_ops {
+ struct kvmppc_host_rm_core *rm_core;
+ void (*vcpu_kick)(struct kvm_vcpu *vcpu);
+};
+
+extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
+
static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
{
#ifdef CONFIG_KVM_BOOKE_HV
diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
index ac9fb11..47897a3 100644
--- a/arch/powerpc/include/asm/pgtable.h
+++ b/arch/powerpc/include/asm/pgtable.h
@@ -78,6 +78,9 @@
}
return __find_linux_pte_or_hugepte(pgdir, ea, is_thp, shift);
}
+
+unsigned long vmalloc_to_phys(void *vmalloc_addr);
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_PGTABLE_H */
diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
index 825663c..78083ed 100644
--- a/arch/powerpc/include/asm/smp.h
+++ b/arch/powerpc/include/asm/smp.h
@@ -114,6 +114,9 @@
#define PPC_MSG_TICK_BROADCAST 2
#define PPC_MSG_DEBUGGER_BREAK 3
+/* This is only used by the powernv kernel */
+#define PPC_MSG_RM_HOST_ACTION 4
+
/* for irq controllers that have dedicated ipis per message (4) */
extern int smp_request_message_ipi(int virq, int message);
extern const char *smp_ipi_name[];
@@ -121,6 +124,7 @@
/* for irq controllers with only a single ipi */
extern void smp_muxed_ipi_set_data(int cpu, unsigned long data);
extern void smp_muxed_ipi_message_pass(int cpu, int msg);
+extern void smp_muxed_ipi_set_message(int cpu, int msg);
extern irqreturn_t smp_ipi_demux(void);
void smp_init_pSeries(void);
diff --git a/arch/powerpc/include/asm/trace.h b/arch/powerpc/include/asm/trace.h
index 8e86b48..32e36b1 100644
--- a/arch/powerpc/include/asm/trace.h
+++ b/arch/powerpc/include/asm/trace.h
@@ -57,12 +57,14 @@
extern void hcall_tracepoint_regfunc(void);
extern void hcall_tracepoint_unregfunc(void);
-TRACE_EVENT_FN(hcall_entry,
+TRACE_EVENT_FN_COND(hcall_entry,
TP_PROTO(unsigned long opcode, unsigned long *args),
TP_ARGS(opcode, args),
+ TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
TP_STRUCT__entry(
__field(unsigned long, opcode)
),
@@ -76,13 +78,15 @@
hcall_tracepoint_regfunc, hcall_tracepoint_unregfunc
);
-TRACE_EVENT_FN(hcall_exit,
+TRACE_EVENT_FN_COND(hcall_exit,
TP_PROTO(unsigned long opcode, unsigned long retval,
unsigned long *retbuf),
TP_ARGS(opcode, retval, retbuf),
+ TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
TP_STRUCT__entry(
__field(unsigned long, opcode)
__field(unsigned long, retval)
diff --git a/arch/powerpc/include/asm/xics.h b/arch/powerpc/include/asm/xics.h
index 0e25bdb..2546048 100644
--- a/arch/powerpc/include/asm/xics.h
+++ b/arch/powerpc/include/asm/xics.h
@@ -30,6 +30,7 @@
#ifdef CONFIG_PPC_ICP_NATIVE
extern int icp_native_init(void);
extern void icp_native_flush_interrupt(void);
+extern void icp_native_cause_ipi_rm(int cpu);
#else
static inline int icp_native_init(void) { return -ENODEV; }
#endif
diff --git a/arch/powerpc/include/uapi/asm/kvm.h b/arch/powerpc/include/uapi/asm/kvm.h
index ab4d473..c93cf35 100644
--- a/arch/powerpc/include/uapi/asm/kvm.h
+++ b/arch/powerpc/include/uapi/asm/kvm.h
@@ -333,6 +333,15 @@
__u32 window_size;
};
+/* for KVM_CAP_SPAPR_TCE_64 */
+struct kvm_create_spapr_tce_64 {
+ __u64 liobn;
+ __u32 page_shift;
+ __u32 flags;
+ __u64 offset; /* in pages */
+ __u64 size; /* in pages */
+};
+
/* for KVM_ALLOCATE_RMA */
struct kvm_allocate_rma {
__u64 rma_size;
diff --git a/arch/powerpc/kernel/eeh_driver.c b/arch/powerpc/kernel/eeh_driver.c
index 9387421..650cfb3 100644
--- a/arch/powerpc/kernel/eeh_driver.c
+++ b/arch/powerpc/kernel/eeh_driver.c
@@ -418,8 +418,7 @@
eeh_pcid_put(dev);
if (driver->err_handler &&
driver->err_handler->error_detected &&
- driver->err_handler->slot_reset &&
- driver->err_handler->resume)
+ driver->err_handler->slot_reset)
return NULL;
}
@@ -564,6 +563,7 @@
*/
eeh_pe_state_mark(pe, EEH_PE_KEEP);
if (bus) {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
pci_lock_rescan_remove();
pcibios_remove_pci_devices(bus);
pci_unlock_rescan_remove();
@@ -803,6 +803,7 @@
* the their PCI config any more.
*/
if (frozen_bus) {
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
eeh_pe_dev_mode_mark(pe, EEH_DEV_REMOVED);
pci_lock_rescan_remove();
@@ -886,6 +887,7 @@
continue;
/* Notify all devices to be down */
+ eeh_pe_state_clear(pe, EEH_PE_PRI_BUS);
bus = eeh_pe_bus_get(phb_pe);
eeh_pe_dev_traverse(pe,
eeh_report_failure, NULL);
diff --git a/arch/powerpc/kernel/eeh_pe.c b/arch/powerpc/kernel/eeh_pe.c
index ca9e537..98f8180 100644
--- a/arch/powerpc/kernel/eeh_pe.c
+++ b/arch/powerpc/kernel/eeh_pe.c
@@ -928,7 +928,7 @@
bus = pe->phb->bus;
} else if (pe->type & EEH_PE_BUS ||
pe->type & EEH_PE_DEVICE) {
- if (pe->bus) {
+ if (pe->state & EEH_PE_PRI_BUS) {
bus = pe->bus;
goto out;
}
diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c
index ac64ffd..08b7a40 100644
--- a/arch/powerpc/kernel/module_64.c
+++ b/arch/powerpc/kernel/module_64.c
@@ -340,7 +340,7 @@
if (name[0] == '.') {
if (strcmp(name+1, "TOC.") == 0)
syms[i].st_shndx = SHN_ABS;
- memmove(name, name+1, strlen(name));
+ syms[i].st_name++;
}
}
}
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index dccc87e..3c5736e 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -1768,9 +1768,9 @@
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
+ rnd = (get_random_long() % (1UL<<(23-PAGE_SHIFT)));
else
- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
+ rnd = (get_random_long() % (1UL<<(30-PAGE_SHIFT)));
return rnd << PAGE_SHIFT;
}
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index ec9ec20..cb8be5d 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -206,7 +206,7 @@
#ifdef CONFIG_PPC_SMP_MUXED_IPI
struct cpu_messages {
- int messages; /* current messages */
+ long messages; /* current messages */
unsigned long data; /* data for cause ipi */
};
static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message);
@@ -218,7 +218,7 @@
info->data = data;
}
-void smp_muxed_ipi_message_pass(int cpu, int msg)
+void smp_muxed_ipi_set_message(int cpu, int msg)
{
struct cpu_messages *info = &per_cpu(ipi_message, cpu);
char *message = (char *)&info->messages;
@@ -228,6 +228,13 @@
*/
smp_mb();
message[msg] = 1;
+}
+
+void smp_muxed_ipi_message_pass(int cpu, int msg)
+{
+ struct cpu_messages *info = &per_cpu(ipi_message, cpu);
+
+ smp_muxed_ipi_set_message(cpu, msg);
/*
* cause_ipi functions are required to include a full barrier
* before doing whatever causes the IPI.
@@ -236,20 +243,31 @@
}
#ifdef __BIG_ENDIAN__
-#define IPI_MESSAGE(A) (1 << (24 - 8 * (A)))
+#define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A)))
#else
-#define IPI_MESSAGE(A) (1 << (8 * (A)))
+#define IPI_MESSAGE(A) (1uL << (8 * (A)))
#endif
irqreturn_t smp_ipi_demux(void)
{
struct cpu_messages *info = this_cpu_ptr(&ipi_message);
- unsigned int all;
+ unsigned long all;
mb(); /* order any irq clear */
do {
all = xchg(&info->messages, 0);
+#if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
+ /*
+ * Must check for PPC_MSG_RM_HOST_ACTION messages
+ * before PPC_MSG_CALL_FUNCTION messages because when
+ * a VM is destroyed, we call kick_all_cpus_sync()
+ * to ensure that any pending PPC_MSG_RM_HOST_ACTION
+ * messages have completed before we free any VCPUs.
+ */
+ if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION))
+ kvmppc_xics_ipi_action();
+#endif
if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION))
generic_smp_call_function_interrupt();
if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE))
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 0570eef..7f7b6d8 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -8,7 +8,7 @@
KVM := ../../../virt/kvm
common-objs-y = $(KVM)/kvm_main.o $(KVM)/coalesced_mmio.o \
- $(KVM)/eventfd.o
+ $(KVM)/eventfd.o $(KVM)/vfio.o
CFLAGS_e500_mmu.o := -I.
CFLAGS_e500_mmu_host.o := -I.
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 638c6d9..b34220d 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -807,7 +807,7 @@
{
#ifdef CONFIG_PPC64
- INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
+ INIT_LIST_HEAD_RCU(&kvm->arch.spapr_tce_tables);
INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
#endif
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
index 54cf9bc..2c2d103 100644
--- a/arch/powerpc/kvm/book3s_64_vio.c
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -14,6 +14,7 @@
*
* Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
* Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
*/
#include <linux/types.h>
@@ -36,28 +37,69 @@
#include <asm/ppc-opcode.h>
#include <asm/kvm_host.h>
#include <asm/udbg.h>
+#include <asm/iommu.h>
+#include <asm/tce.h>
-#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
-
-static long kvmppc_stt_npages(unsigned long window_size)
+static unsigned long kvmppc_tce_pages(unsigned long iommu_pages)
{
- return ALIGN((window_size >> SPAPR_TCE_SHIFT)
- * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
+ return ALIGN(iommu_pages * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
}
-static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
+static unsigned long kvmppc_stt_pages(unsigned long tce_pages)
{
- struct kvm *kvm = stt->kvm;
- int i;
+ unsigned long stt_bytes = sizeof(struct kvmppc_spapr_tce_table) +
+ (tce_pages * sizeof(struct page *));
- mutex_lock(&kvm->lock);
- list_del(&stt->list);
- for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
+ return tce_pages + ALIGN(stt_bytes, PAGE_SIZE) / PAGE_SIZE;
+}
+
+static long kvmppc_account_memlimit(unsigned long stt_pages, bool inc)
+{
+ long ret = 0;
+
+ if (!current || !current->mm)
+ return ret; /* process exited */
+
+ down_write(¤t->mm->mmap_sem);
+
+ if (inc) {
+ unsigned long locked, lock_limit;
+
+ locked = current->mm->locked_vm + stt_pages;
+ lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
+ if (locked > lock_limit && !capable(CAP_IPC_LOCK))
+ ret = -ENOMEM;
+ else
+ current->mm->locked_vm += stt_pages;
+ } else {
+ if (WARN_ON_ONCE(stt_pages > current->mm->locked_vm))
+ stt_pages = current->mm->locked_vm;
+
+ current->mm->locked_vm -= stt_pages;
+ }
+
+ pr_debug("[%d] RLIMIT_MEMLOCK KVM %c%ld %ld/%ld%s\n", current->pid,
+ inc ? '+' : '-',
+ stt_pages << PAGE_SHIFT,
+ current->mm->locked_vm << PAGE_SHIFT,
+ rlimit(RLIMIT_MEMLOCK),
+ ret ? " - exceeded" : "");
+
+ up_write(¤t->mm->mmap_sem);
+
+ return ret;
+}
+
+static void release_spapr_tce_table(struct rcu_head *head)
+{
+ struct kvmppc_spapr_tce_table *stt = container_of(head,
+ struct kvmppc_spapr_tce_table, rcu);
+ unsigned long i, npages = kvmppc_tce_pages(stt->size);
+
+ for (i = 0; i < npages; i++)
__free_page(stt->pages[i]);
- kfree(stt);
- mutex_unlock(&kvm->lock);
- kvm_put_kvm(kvm);
+ kfree(stt);
}
static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
@@ -65,7 +107,7 @@
struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
struct page *page;
- if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
+ if (vmf->pgoff >= kvmppc_tce_pages(stt->size))
return VM_FAULT_SIGBUS;
page = stt->pages[vmf->pgoff];
@@ -88,7 +130,14 @@
{
struct kvmppc_spapr_tce_table *stt = filp->private_data;
- release_spapr_tce_table(stt);
+ list_del_rcu(&stt->list);
+
+ kvm_put_kvm(stt->kvm);
+
+ kvmppc_account_memlimit(
+ kvmppc_stt_pages(kvmppc_tce_pages(stt->size)), false);
+ call_rcu(&stt->rcu, release_spapr_tce_table);
+
return 0;
}
@@ -98,20 +147,29 @@
};
long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
- struct kvm_create_spapr_tce *args)
+ struct kvm_create_spapr_tce_64 *args)
{
struct kvmppc_spapr_tce_table *stt = NULL;
- long npages;
+ unsigned long npages, size;
int ret = -ENOMEM;
int i;
+ if (!args->size)
+ return -EINVAL;
+
/* Check this LIOBN hasn't been previously allocated */
list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
if (stt->liobn == args->liobn)
return -EBUSY;
}
- npages = kvmppc_stt_npages(args->window_size);
+ size = args->size;
+ npages = kvmppc_tce_pages(size);
+ ret = kvmppc_account_memlimit(kvmppc_stt_pages(npages), true);
+ if (ret) {
+ stt = NULL;
+ goto fail;
+ }
stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
GFP_KERNEL);
@@ -119,7 +177,9 @@
goto fail;
stt->liobn = args->liobn;
- stt->window_size = args->window_size;
+ stt->page_shift = args->page_shift;
+ stt->offset = args->offset;
+ stt->size = size;
stt->kvm = kvm;
for (i = 0; i < npages; i++) {
@@ -131,7 +191,7 @@
kvm_get_kvm(kvm);
mutex_lock(&kvm->lock);
- list_add(&stt->list, &kvm->arch.spapr_tce_tables);
+ list_add_rcu(&stt->list, &kvm->arch.spapr_tce_tables);
mutex_unlock(&kvm->lock);
@@ -148,3 +208,59 @@
}
return ret;
}
+
+long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_list, unsigned long npages)
+{
+ struct kvmppc_spapr_tce_table *stt;
+ long i, ret = H_SUCCESS, idx;
+ unsigned long entry, ua = 0;
+ u64 __user *tces, tce;
+
+ stt = kvmppc_find_table(vcpu, liobn);
+ if (!stt)
+ return H_TOO_HARD;
+
+ entry = ioba >> stt->page_shift;
+ /*
+ * SPAPR spec says that the maximum size of the list is 512 TCEs
+ * so the whole table fits in 4K page
+ */
+ if (npages > 512)
+ return H_PARAMETER;
+
+ if (tce_list & (SZ_4K - 1))
+ return H_PARAMETER;
+
+ ret = kvmppc_ioba_validate(stt, ioba, npages);
+ if (ret != H_SUCCESS)
+ return ret;
+
+ idx = srcu_read_lock(&vcpu->kvm->srcu);
+ if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, NULL)) {
+ ret = H_TOO_HARD;
+ goto unlock_exit;
+ }
+ tces = (u64 __user *) ua;
+
+ for (i = 0; i < npages; ++i) {
+ if (get_user(tce, tces + i)) {
+ ret = H_TOO_HARD;
+ goto unlock_exit;
+ }
+ tce = be64_to_cpu(tce);
+
+ ret = kvmppc_tce_validate(stt, tce);
+ if (ret != H_SUCCESS)
+ goto unlock_exit;
+
+ kvmppc_tce_put(stt, entry + i, tce);
+ }
+
+unlock_exit:
+ srcu_read_unlock(&vcpu->kvm->srcu, idx);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_put_tce_indirect);
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index 89e96b3..44be73e 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -14,6 +14,7 @@
*
* Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
* Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
+ * Copyright 2016 Alexey Kardashevskiy, IBM Corporation <aik@au1.ibm.com>
*/
#include <linux/types.h>
@@ -30,76 +31,321 @@
#include <asm/kvm_ppc.h>
#include <asm/kvm_book3s.h>
#include <asm/mmu-hash64.h>
+#include <asm/mmu_context.h>
#include <asm/hvcall.h>
#include <asm/synch.h>
#include <asm/ppc-opcode.h>
#include <asm/kvm_host.h>
#include <asm/udbg.h>
+#include <asm/iommu.h>
+#include <asm/tce.h>
+#include <asm/iommu.h>
#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
-/* WARNING: This will be called in real-mode on HV KVM and virtual
+/*
+ * Finds a TCE table descriptor by LIOBN.
+ *
+ * WARNING: This will be called in real or virtual mode on HV KVM and virtual
* mode on PR KVM
*/
-long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
- unsigned long ioba, unsigned long tce)
+struct kvmppc_spapr_tce_table *kvmppc_find_table(struct kvm_vcpu *vcpu,
+ unsigned long liobn)
{
struct kvm *kvm = vcpu->kvm;
struct kvmppc_spapr_tce_table *stt;
+ list_for_each_entry_lockless(stt, &kvm->arch.spapr_tce_tables, list)
+ if (stt->liobn == liobn)
+ return stt;
+
+ return NULL;
+}
+EXPORT_SYMBOL_GPL(kvmppc_find_table);
+
+/*
+ * Validates IO address.
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ * mode on PR KVM
+ */
+long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
+ unsigned long ioba, unsigned long npages)
+{
+ unsigned long mask = (1ULL << stt->page_shift) - 1;
+ unsigned long idx = ioba >> stt->page_shift;
+
+ if ((ioba & mask) || (idx < stt->offset) ||
+ (idx - stt->offset + npages > stt->size) ||
+ (idx + npages < idx))
+ return H_PARAMETER;
+
+ return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_ioba_validate);
+
+/*
+ * Validates TCE address.
+ * At the moment flags and page mask are validated.
+ * As the host kernel does not access those addresses (just puts them
+ * to the table and user space is supposed to process them), we can skip
+ * checking other things (such as TCE is a guest RAM address or the page
+ * was actually allocated).
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ * mode on PR KVM
+ */
+long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *stt, unsigned long tce)
+{
+ unsigned long page_mask = ~((1ULL << stt->page_shift) - 1);
+ unsigned long mask = ~(page_mask | TCE_PCI_WRITE | TCE_PCI_READ);
+
+ if (tce & mask)
+ return H_PARAMETER;
+
+ return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_tce_validate);
+
+/* Note on the use of page_address() in real mode,
+ *
+ * It is safe to use page_address() in real mode on ppc64 because
+ * page_address() is always defined as lowmem_page_address()
+ * which returns __va(PFN_PHYS(page_to_pfn(page))) which is arithmetic
+ * operation and does not access page struct.
+ *
+ * Theoretically page_address() could be defined different
+ * but either WANT_PAGE_VIRTUAL or HASHED_PAGE_VIRTUAL
+ * would have to be enabled.
+ * WANT_PAGE_VIRTUAL is never enabled on ppc32/ppc64,
+ * HASHED_PAGE_VIRTUAL could be enabled for ppc32 only and only
+ * if CONFIG_HIGHMEM is defined. As CONFIG_SPARSEMEM_VMEMMAP
+ * is not expected to be enabled on ppc32, page_address()
+ * is safe for ppc32 as well.
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ * mode on PR KVM
+ */
+static u64 *kvmppc_page_address(struct page *page)
+{
+#if defined(HASHED_PAGE_VIRTUAL) || defined(WANT_PAGE_VIRTUAL)
+#error TODO: fix to avoid page_address() here
+#endif
+ return (u64 *) page_address(page);
+}
+
+/*
+ * Handles TCE requests for emulated devices.
+ * Puts guest TCE values to the table and expects user space to convert them.
+ * Called in both real and virtual modes.
+ * Cannot fail so kvmppc_tce_validate must be called before it.
+ *
+ * WARNING: This will be called in real-mode on HV KVM and virtual
+ * mode on PR KVM
+ */
+void kvmppc_tce_put(struct kvmppc_spapr_tce_table *stt,
+ unsigned long idx, unsigned long tce)
+{
+ struct page *page;
+ u64 *tbl;
+
+ idx -= stt->offset;
+ page = stt->pages[idx / TCES_PER_PAGE];
+ tbl = kvmppc_page_address(page);
+
+ tbl[idx % TCES_PER_PAGE] = tce;
+}
+EXPORT_SYMBOL_GPL(kvmppc_tce_put);
+
+long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
+ unsigned long *ua, unsigned long **prmap)
+{
+ unsigned long gfn = gpa >> PAGE_SHIFT;
+ struct kvm_memory_slot *memslot;
+
+ memslot = search_memslots(kvm_memslots(kvm), gfn);
+ if (!memslot)
+ return -EINVAL;
+
+ *ua = __gfn_to_hva_memslot(memslot, gfn) |
+ (gpa & ~(PAGE_MASK | TCE_PCI_READ | TCE_PCI_WRITE));
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+ if (prmap)
+ *prmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
+#endif
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(kvmppc_gpa_to_ua);
+
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
+ unsigned long ioba, unsigned long tce)
+{
+ struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+ long ret;
+
/* udbg_printf("H_PUT_TCE(): liobn=0x%lx ioba=0x%lx, tce=0x%lx\n", */
/* liobn, ioba, tce); */
- list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
- if (stt->liobn == liobn) {
- unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
- struct page *page;
- u64 *tbl;
+ if (!stt)
+ return H_TOO_HARD;
- /* udbg_printf("H_PUT_TCE: liobn 0x%lx => stt=%p window_size=0x%x\n", */
- /* liobn, stt, stt->window_size); */
- if (ioba >= stt->window_size)
- return H_PARAMETER;
+ ret = kvmppc_ioba_validate(stt, ioba, 1);
+ if (ret != H_SUCCESS)
+ return ret;
- page = stt->pages[idx / TCES_PER_PAGE];
- tbl = (u64 *)page_address(page);
+ ret = kvmppc_tce_validate(stt, tce);
+ if (ret != H_SUCCESS)
+ return ret;
- /* FIXME: Need to validate the TCE itself */
- /* udbg_printf("tce @ %p\n", &tbl[idx % TCES_PER_PAGE]); */
- tbl[idx % TCES_PER_PAGE] = tce;
- return H_SUCCESS;
- }
- }
+ kvmppc_tce_put(stt, ioba >> stt->page_shift, tce);
- /* Didn't find the liobn, punt it to userspace */
- return H_TOO_HARD;
+ return H_SUCCESS;
}
EXPORT_SYMBOL_GPL(kvmppc_h_put_tce);
+static long kvmppc_rm_ua_to_hpa(struct kvm_vcpu *vcpu,
+ unsigned long ua, unsigned long *phpa)
+{
+ pte_t *ptep, pte;
+ unsigned shift = 0;
+
+ ptep = __find_linux_pte_or_hugepte(vcpu->arch.pgdir, ua, NULL, &shift);
+ if (!ptep || !pte_present(*ptep))
+ return -ENXIO;
+ pte = *ptep;
+
+ if (!shift)
+ shift = PAGE_SHIFT;
+
+ /* Avoid handling anything potentially complicated in realmode */
+ if (shift > PAGE_SHIFT)
+ return -EAGAIN;
+
+ if (!pte_young(pte))
+ return -EAGAIN;
+
+ *phpa = (pte_pfn(pte) << PAGE_SHIFT) | (ua & ((1ULL << shift) - 1)) |
+ (ua & ~PAGE_MASK);
+
+ return 0;
+}
+
+long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_list, unsigned long npages)
+{
+ struct kvmppc_spapr_tce_table *stt;
+ long i, ret = H_SUCCESS;
+ unsigned long tces, entry, ua = 0;
+ unsigned long *rmap = NULL;
+
+ stt = kvmppc_find_table(vcpu, liobn);
+ if (!stt)
+ return H_TOO_HARD;
+
+ entry = ioba >> stt->page_shift;
+ /*
+ * The spec says that the maximum size of the list is 512 TCEs
+ * so the whole table addressed resides in 4K page
+ */
+ if (npages > 512)
+ return H_PARAMETER;
+
+ if (tce_list & (SZ_4K - 1))
+ return H_PARAMETER;
+
+ ret = kvmppc_ioba_validate(stt, ioba, npages);
+ if (ret != H_SUCCESS)
+ return ret;
+
+ if (kvmppc_gpa_to_ua(vcpu->kvm, tce_list, &ua, &rmap))
+ return H_TOO_HARD;
+
+ rmap = (void *) vmalloc_to_phys(rmap);
+
+ /*
+ * Synchronize with the MMU notifier callbacks in
+ * book3s_64_mmu_hv.c (kvm_unmap_hva_hv etc.).
+ * While we have the rmap lock, code running on other CPUs
+ * cannot finish unmapping the host real page that backs
+ * this guest real page, so we are OK to access the host
+ * real page.
+ */
+ lock_rmap(rmap);
+ if (kvmppc_rm_ua_to_hpa(vcpu, ua, &tces)) {
+ ret = H_TOO_HARD;
+ goto unlock_exit;
+ }
+
+ for (i = 0; i < npages; ++i) {
+ unsigned long tce = be64_to_cpu(((u64 *)tces)[i]);
+
+ ret = kvmppc_tce_validate(stt, tce);
+ if (ret != H_SUCCESS)
+ goto unlock_exit;
+
+ kvmppc_tce_put(stt, entry + i, tce);
+ }
+
+unlock_exit:
+ unlock_rmap(rmap);
+
+ return ret;
+}
+
+long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
+ unsigned long liobn, unsigned long ioba,
+ unsigned long tce_value, unsigned long npages)
+{
+ struct kvmppc_spapr_tce_table *stt;
+ long i, ret;
+
+ stt = kvmppc_find_table(vcpu, liobn);
+ if (!stt)
+ return H_TOO_HARD;
+
+ ret = kvmppc_ioba_validate(stt, ioba, npages);
+ if (ret != H_SUCCESS)
+ return ret;
+
+ /* Check permission bits only to allow userspace poison TCE for debug */
+ if (tce_value & (TCE_PCI_WRITE | TCE_PCI_READ))
+ return H_PARAMETER;
+
+ for (i = 0; i < npages; ++i, ioba += (1ULL << stt->page_shift))
+ kvmppc_tce_put(stt, ioba >> stt->page_shift, tce_value);
+
+ return H_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(kvmppc_h_stuff_tce);
+
long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
unsigned long ioba)
{
- struct kvm *kvm = vcpu->kvm;
- struct kvmppc_spapr_tce_table *stt;
+ struct kvmppc_spapr_tce_table *stt = kvmppc_find_table(vcpu, liobn);
+ long ret;
+ unsigned long idx;
+ struct page *page;
+ u64 *tbl;
- list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
- if (stt->liobn == liobn) {
- unsigned long idx = ioba >> SPAPR_TCE_SHIFT;
- struct page *page;
- u64 *tbl;
+ if (!stt)
+ return H_TOO_HARD;
- if (ioba >= stt->window_size)
- return H_PARAMETER;
+ ret = kvmppc_ioba_validate(stt, ioba, 1);
+ if (ret != H_SUCCESS)
+ return ret;
- page = stt->pages[idx / TCES_PER_PAGE];
- tbl = (u64 *)page_address(page);
+ idx = (ioba >> stt->page_shift) - stt->offset;
+ page = stt->pages[idx / TCES_PER_PAGE];
+ tbl = (u64 *)page_address(page);
- vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
- return H_SUCCESS;
- }
- }
+ vcpu->arch.gpr[4] = tbl[idx % TCES_PER_PAGE];
- /* Didn't find the liobn, punt it to userspace */
- return H_TOO_HARD;
+ return H_SUCCESS;
}
EXPORT_SYMBOL_GPL(kvmppc_h_get_tce);
+
+#endif /* KVM_BOOK3S_HV_POSSIBLE */
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index baeddb0..f47fffe 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -81,6 +81,17 @@
module_param(target_smt_mode, int, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
+#ifdef CONFIG_KVM_XICS
+static struct kernel_param_ops module_param_ops = {
+ .set = param_set_int,
+ .get = param_get_int,
+};
+
+module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
+ S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
+#endif
+
static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
@@ -768,7 +779,31 @@
if (kvmppc_xics_enabled(vcpu)) {
ret = kvmppc_xics_hcall(vcpu, req);
break;
- } /* fallthrough */
+ }
+ return RESUME_HOST;
+ case H_PUT_TCE:
+ ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_PUT_TCE_INDIRECT:
+ ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
+ case H_STUFF_TCE:
+ ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
+ kvmppc_get_gpr(vcpu, 5),
+ kvmppc_get_gpr(vcpu, 6),
+ kvmppc_get_gpr(vcpu, 7));
+ if (ret == H_TOO_HARD)
+ return RESUME_HOST;
+ break;
default:
return RESUME_HOST;
}
@@ -2279,6 +2314,46 @@
}
/*
+ * Clear core from the list of active host cores as we are about to
+ * enter the guest. Only do this if it is the primary thread of the
+ * core (not if a subcore) that is entering the guest.
+ */
+static inline void kvmppc_clear_host_core(int cpu)
+{
+ int core;
+
+ if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
+ return;
+ /*
+ * Memory barrier can be omitted here as we will do a smp_wmb()
+ * later in kvmppc_start_thread and we need ensure that state is
+ * visible to other CPUs only after we enter guest.
+ */
+ core = cpu >> threads_shift;
+ kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
+}
+
+/*
+ * Advertise this core as an active host core since we exited the guest
+ * Only need to do this if it is the primary thread of the core that is
+ * exiting.
+ */
+static inline void kvmppc_set_host_core(int cpu)
+{
+ int core;
+
+ if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
+ return;
+
+ /*
+ * Memory barrier can be omitted here because we do a spin_unlock
+ * immediately after this which provides the memory barrier.
+ */
+ core = cpu >> threads_shift;
+ kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
+}
+
+/*
* Run a set of guest threads on a physical core.
* Called with vc->lock held.
*/
@@ -2390,6 +2465,8 @@
}
}
+ kvmppc_clear_host_core(pcpu);
+
/* Start all the threads */
active = 0;
for (sub = 0; sub < core_info.n_subcores; ++sub) {
@@ -2486,6 +2563,8 @@
kvmppc_ipi_thread(pcpu + i);
}
+ kvmppc_set_host_core(pcpu);
+
spin_unlock(&vc->lock);
/* make sure updates to secondary vcpu structs are visible now */
@@ -2984,6 +3063,114 @@
goto out_srcu;
}
+#ifdef CONFIG_KVM_XICS
+static int kvmppc_cpu_notify(struct notifier_block *self, unsigned long action,
+ void *hcpu)
+{
+ unsigned long cpu = (long)hcpu;
+
+ switch (action) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_PREPARE_FROZEN:
+ kvmppc_set_host_core(cpu);
+ break;
+
+#ifdef CONFIG_HOTPLUG_CPU
+ case CPU_DEAD:
+ case CPU_DEAD_FROZEN:
+ case CPU_UP_CANCELED:
+ case CPU_UP_CANCELED_FROZEN:
+ kvmppc_clear_host_core(cpu);
+ break;
+#endif
+ default:
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block kvmppc_cpu_notifier = {
+ .notifier_call = kvmppc_cpu_notify,
+};
+
+/*
+ * Allocate a per-core structure for managing state about which cores are
+ * running in the host versus the guest and for exchanging data between
+ * real mode KVM and CPU running in the host.
+ * This is only done for the first VM.
+ * The allocated structure stays even if all VMs have stopped.
+ * It is only freed when the kvm-hv module is unloaded.
+ * It's OK for this routine to fail, we just don't support host
+ * core operations like redirecting H_IPI wakeups.
+ */
+void kvmppc_alloc_host_rm_ops(void)
+{
+ struct kvmppc_host_rm_ops *ops;
+ unsigned long l_ops;
+ int cpu, core;
+ int size;
+
+ /* Not the first time here ? */
+ if (kvmppc_host_rm_ops_hv != NULL)
+ return;
+
+ ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
+ if (!ops)
+ return;
+
+ size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
+ ops->rm_core = kzalloc(size, GFP_KERNEL);
+
+ if (!ops->rm_core) {
+ kfree(ops);
+ return;
+ }
+
+ get_online_cpus();
+
+ for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
+ if (!cpu_online(cpu))
+ continue;
+
+ core = cpu >> threads_shift;
+ ops->rm_core[core].rm_state.in_host = 1;
+ }
+
+ ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv;
+
+ /*
+ * Make the contents of the kvmppc_host_rm_ops structure visible
+ * to other CPUs before we assign it to the global variable.
+ * Do an atomic assignment (no locks used here), but if someone
+ * beats us to it, just free our copy and return.
+ */
+ smp_wmb();
+ l_ops = (unsigned long) ops;
+
+ if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
+ put_online_cpus();
+ kfree(ops->rm_core);
+ kfree(ops);
+ return;
+ }
+
+ register_cpu_notifier(&kvmppc_cpu_notifier);
+
+ put_online_cpus();
+}
+
+void kvmppc_free_host_rm_ops(void)
+{
+ if (kvmppc_host_rm_ops_hv) {
+ unregister_cpu_notifier(&kvmppc_cpu_notifier);
+ kfree(kvmppc_host_rm_ops_hv->rm_core);
+ kfree(kvmppc_host_rm_ops_hv);
+ kvmppc_host_rm_ops_hv = NULL;
+ }
+}
+#endif
+
static int kvmppc_core_init_vm_hv(struct kvm *kvm)
{
unsigned long lpcr, lpid;
@@ -2996,6 +3183,8 @@
return -ENOMEM;
kvm->arch.lpid = lpid;
+ kvmppc_alloc_host_rm_ops();
+
/*
* Since we don't flush the TLB when tearing down a VM,
* and this lpid might have previously been used,
@@ -3229,6 +3418,7 @@
static void kvmppc_book3s_exit_hv(void)
{
+ kvmppc_free_host_rm_ops();
kvmppc_hv_ops = NULL;
}
diff --git a/arch/powerpc/kvm/book3s_hv_builtin.c b/arch/powerpc/kvm/book3s_hv_builtin.c
index fd7006b..5f0380d 100644
--- a/arch/powerpc/kvm/book3s_hv_builtin.c
+++ b/arch/powerpc/kvm/book3s_hv_builtin.c
@@ -283,3 +283,6 @@
kvmhv_interrupt_vcore(vc, ee);
}
}
+
+struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
+EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv);
diff --git a/arch/powerpc/kvm/book3s_hv_rm_xics.c b/arch/powerpc/kvm/book3s_hv_rm_xics.c
index 24f5807..980d8a6 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_xics.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_xics.c
@@ -17,12 +17,16 @@
#include <asm/xics.h>
#include <asm/debug.h>
#include <asm/synch.h>
+#include <asm/cputhreads.h>
#include <asm/ppc-opcode.h>
#include "book3s_xics.h"
#define DEBUG_PASSUP
+int h_ipi_redirect = 1;
+EXPORT_SYMBOL(h_ipi_redirect);
+
static void icp_rm_deliver_irq(struct kvmppc_xics *xics, struct kvmppc_icp *icp,
u32 new_irq);
@@ -50,11 +54,84 @@
/* -- ICP routines -- */
+#ifdef CONFIG_SMP
+static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu)
+{
+ int hcpu;
+
+ hcpu = hcore << threads_shift;
+ kvmppc_host_rm_ops_hv->rm_core[hcore].rm_data = vcpu;
+ smp_muxed_ipi_set_message(hcpu, PPC_MSG_RM_HOST_ACTION);
+ icp_native_cause_ipi_rm(hcpu);
+}
+#else
+static inline void icp_send_hcore_msg(int hcore, struct kvm_vcpu *vcpu) { }
+#endif
+
+/*
+ * We start the search from our current CPU Id in the core map
+ * and go in a circle until we get back to our ID looking for a
+ * core that is running in host context and that hasn't already
+ * been targeted for another rm_host_ops.
+ *
+ * In the future, could consider using a fairer algorithm (one
+ * that distributes the IPIs better)
+ *
+ * Returns -1, if no CPU could be found in the host
+ * Else, returns a CPU Id which has been reserved for use
+ */
+static inline int grab_next_hostcore(int start,
+ struct kvmppc_host_rm_core *rm_core, int max, int action)
+{
+ bool success;
+ int core;
+ union kvmppc_rm_state old, new;
+
+ for (core = start + 1; core < max; core++) {
+ old = new = READ_ONCE(rm_core[core].rm_state);
+
+ if (!old.in_host || old.rm_action)
+ continue;
+
+ /* Try to grab this host core if not taken already. */
+ new.rm_action = action;
+
+ success = cmpxchg64(&rm_core[core].rm_state.raw,
+ old.raw, new.raw) == old.raw;
+ if (success) {
+ /*
+ * Make sure that the store to the rm_action is made
+ * visible before we return to caller (and the
+ * subsequent store to rm_data) to synchronize with
+ * the IPI handler.
+ */
+ smp_wmb();
+ return core;
+ }
+ }
+
+ return -1;
+}
+
+static inline int find_available_hostcore(int action)
+{
+ int core;
+ int my_core = smp_processor_id() >> threads_shift;
+ struct kvmppc_host_rm_core *rm_core = kvmppc_host_rm_ops_hv->rm_core;
+
+ core = grab_next_hostcore(my_core, rm_core, cpu_nr_cores(), action);
+ if (core == -1)
+ core = grab_next_hostcore(core, rm_core, my_core, action);
+
+ return core;
+}
+
static void icp_rm_set_vcpu_irq(struct kvm_vcpu *vcpu,
struct kvm_vcpu *this_vcpu)
{
struct kvmppc_icp *this_icp = this_vcpu->arch.icp;
int cpu;
+ int hcore;
/* Mark the target VCPU as having an interrupt pending */
vcpu->stat.queue_intr++;
@@ -66,11 +143,22 @@
return;
}
- /* Check if the core is loaded, if not, too hard */
+ /*
+ * Check if the core is loaded,
+ * if not, find an available host core to post to wake the VCPU,
+ * if we can't find one, set up state to eventually return too hard.
+ */
cpu = vcpu->arch.thread_cpu;
if (cpu < 0 || cpu >= nr_cpu_ids) {
- this_icp->rm_action |= XICS_RM_KICK_VCPU;
- this_icp->rm_kick_target = vcpu;
+ hcore = -1;
+ if (kvmppc_host_rm_ops_hv && h_ipi_redirect)
+ hcore = find_available_hostcore(XICS_RM_KICK_VCPU);
+ if (hcore != -1) {
+ icp_send_hcore_msg(hcore, vcpu);
+ } else {
+ this_icp->rm_action |= XICS_RM_KICK_VCPU;
+ this_icp->rm_kick_target = vcpu;
+ }
return;
}
@@ -623,3 +711,40 @@
bail:
return check_too_hard(xics, icp);
}
+
+/* --- Non-real mode XICS-related built-in routines --- */
+
+/**
+ * Host Operations poked by RM KVM
+ */
+static void rm_host_ipi_action(int action, void *data)
+{
+ switch (action) {
+ case XICS_RM_KICK_VCPU:
+ kvmppc_host_rm_ops_hv->vcpu_kick(data);
+ break;
+ default:
+ WARN(1, "Unexpected rm_action=%d data=%p\n", action, data);
+ break;
+ }
+
+}
+
+void kvmppc_xics_ipi_action(void)
+{
+ int core;
+ unsigned int cpu = smp_processor_id();
+ struct kvmppc_host_rm_core *rm_corep;
+
+ core = cpu >> threads_shift;
+ rm_corep = &kvmppc_host_rm_ops_hv->rm_core[core];
+
+ if (rm_corep->rm_data) {
+ rm_host_ipi_action(rm_corep->rm_state.rm_action,
+ rm_corep->rm_data);
+ /* Order these stores against the real mode KVM */
+ rm_corep->rm_data = NULL;
+ smp_wmb();
+ rm_corep->rm_state.rm_action = 0;
+ }
+}
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index 6ee26de..ed16182 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -2006,8 +2006,8 @@
.long 0 /* 0x12c */
.long 0 /* 0x130 */
.long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
- .long 0 /* 0x138 */
- .long 0 /* 0x13c */
+ .long DOTSYM(kvmppc_h_stuff_tce) - hcall_real_table
+ .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
.long 0 /* 0x140 */
.long 0 /* 0x144 */
.long 0 /* 0x148 */
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index f2c75a1..02176fd 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -280,6 +280,37 @@
return EMULATE_DONE;
}
+static int kvmppc_h_pr_put_tce_indirect(struct kvm_vcpu *vcpu)
+{
+ unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
+ unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
+ unsigned long tce = kvmppc_get_gpr(vcpu, 6);
+ unsigned long npages = kvmppc_get_gpr(vcpu, 7);
+ long rc;
+
+ rc = kvmppc_h_put_tce_indirect(vcpu, liobn, ioba,
+ tce, npages);
+ if (rc == H_TOO_HARD)
+ return EMULATE_FAIL;
+ kvmppc_set_gpr(vcpu, 3, rc);
+ return EMULATE_DONE;
+}
+
+static int kvmppc_h_pr_stuff_tce(struct kvm_vcpu *vcpu)
+{
+ unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
+ unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
+ unsigned long tce_value = kvmppc_get_gpr(vcpu, 6);
+ unsigned long npages = kvmppc_get_gpr(vcpu, 7);
+ long rc;
+
+ rc = kvmppc_h_stuff_tce(vcpu, liobn, ioba, tce_value, npages);
+ if (rc == H_TOO_HARD)
+ return EMULATE_FAIL;
+ kvmppc_set_gpr(vcpu, 3, rc);
+ return EMULATE_DONE;
+}
+
static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
{
long rc = kvmppc_xics_hcall(vcpu, cmd);
@@ -306,6 +337,10 @@
return kvmppc_h_pr_bulk_remove(vcpu);
case H_PUT_TCE:
return kvmppc_h_pr_put_tce(vcpu);
+ case H_PUT_TCE_INDIRECT:
+ return kvmppc_h_pr_put_tce_indirect(vcpu);
+ case H_STUFF_TCE:
+ return kvmppc_h_pr_stuff_tce(vcpu);
case H_CEDE:
kvmppc_set_msr_fast(vcpu, kvmppc_get_msr(vcpu) | MSR_EE);
kvm_vcpu_block(vcpu);
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index a3b182d..19aa59b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -33,6 +33,7 @@
#include <asm/tlbflush.h>
#include <asm/cputhreads.h>
#include <asm/irqflags.h>
+#include <asm/iommu.h>
#include "timing.h"
#include "irq.h"
#include "../mm/mmu_decl.h"
@@ -437,6 +438,16 @@
unsigned int i;
struct kvm_vcpu *vcpu;
+#ifdef CONFIG_KVM_XICS
+ /*
+ * We call kick_all_cpus_sync() to ensure that all
+ * CPUs have executed any pending IPIs before we
+ * continue and free VCPUs structures below.
+ */
+ if (is_kvmppc_hv_enabled(kvm))
+ kick_all_cpus_sync();
+#endif
+
kvm_for_each_vcpu(i, vcpu, kvm)
kvm_arch_vcpu_free(vcpu);
@@ -509,6 +520,7 @@
#ifdef CONFIG_PPC_BOOK3S_64
case KVM_CAP_SPAPR_TCE:
+ case KVM_CAP_SPAPR_TCE_64:
case KVM_CAP_PPC_ALLOC_HTAB:
case KVM_CAP_PPC_RTAS:
case KVM_CAP_PPC_FIXUP_HCALL:
@@ -569,6 +581,9 @@
case KVM_CAP_PPC_GET_SMMU_INFO:
r = 1;
break;
+ case KVM_CAP_SPAPR_MULTITCE:
+ r = 1;
+ break;
#endif
default:
r = 0;
@@ -1331,13 +1346,34 @@
break;
}
#ifdef CONFIG_PPC_BOOK3S_64
+ case KVM_CREATE_SPAPR_TCE_64: {
+ struct kvm_create_spapr_tce_64 create_tce_64;
+
+ r = -EFAULT;
+ if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64)))
+ goto out;
+ if (create_tce_64.flags) {
+ r = -EINVAL;
+ goto out;
+ }
+ r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
+ goto out;
+ }
case KVM_CREATE_SPAPR_TCE: {
struct kvm_create_spapr_tce create_tce;
+ struct kvm_create_spapr_tce_64 create_tce_64;
r = -EFAULT;
if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
goto out;
- r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
+
+ create_tce_64.liobn = create_tce.liobn;
+ create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K;
+ create_tce_64.offset = 0;
+ create_tce_64.size = create_tce.window_size >>
+ IOMMU_PAGE_SHIFT_4K;
+ create_tce_64.flags = 0;
+ r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64);
goto out;
}
case KVM_PPC_GET_SMMU_INFO: {
diff --git a/arch/powerpc/mm/hash64_64k.c b/arch/powerpc/mm/hash64_64k.c
index 0762c1e..edb0991 100644
--- a/arch/powerpc/mm/hash64_64k.c
+++ b/arch/powerpc/mm/hash64_64k.c
@@ -111,7 +111,13 @@
*/
if (!(old_pte & _PAGE_COMBO)) {
flush_hash_page(vpn, rpte, MMU_PAGE_64K, ssize, flags);
- old_pte &= ~_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND;
+ /*
+ * clear the old slot details from the old and new pte.
+ * On hash insert failure we use old pte value and we don't
+ * want slot information there if we have a insert failure.
+ */
+ old_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
+ new_pte &= ~(_PAGE_HASHPTE | _PAGE_F_GIX | _PAGE_F_SECOND);
goto htab_insert_hpte;
}
/*
diff --git a/arch/powerpc/mm/hugepage-hash64.c b/arch/powerpc/mm/hugepage-hash64.c
index 49b152b..eb2accd 100644
--- a/arch/powerpc/mm/hugepage-hash64.c
+++ b/arch/powerpc/mm/hugepage-hash64.c
@@ -78,9 +78,19 @@
* base page size. This is because demote_segment won't flush
* hash page table entries.
*/
- if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO))
+ if ((old_pmd & _PAGE_HASHPTE) && !(old_pmd & _PAGE_COMBO)) {
flush_hash_hugepage(vsid, ea, pmdp, MMU_PAGE_64K,
ssize, flags);
+ /*
+ * With THP, we also clear the slot information with
+ * respect to all the 64K hash pte mapping the 16MB
+ * page. They are all invalid now. This make sure we
+ * don't find the slot valid when we fault with 4k
+ * base page size.
+ *
+ */
+ memset(hpte_slot_array, 0, PTE_FRAG_SIZE);
+ }
}
valid = hpte_valid(hpte_slot_array, index);
diff --git a/arch/powerpc/mm/mmap.c b/arch/powerpc/mm/mmap.c
index 0f0502e..4087705 100644
--- a/arch/powerpc/mm/mmap.c
+++ b/arch/powerpc/mm/mmap.c
@@ -59,9 +59,9 @@
/* 8MB for 32bit, 1GB for 64bit */
if (is_32bit_task())
- rnd = (unsigned long)get_random_int() % (1<<(23-PAGE_SHIFT));
+ rnd = get_random_long() % (1<<(23-PAGE_SHIFT));
else
- rnd = (unsigned long)get_random_int() % (1<<(30-PAGE_SHIFT));
+ rnd = get_random_long() % (1UL<<(30-PAGE_SHIFT));
return rnd << PAGE_SHIFT;
}
diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c
index 83dfd79..de37ff4 100644
--- a/arch/powerpc/mm/pgtable.c
+++ b/arch/powerpc/mm/pgtable.c
@@ -243,3 +243,11 @@
}
#endif /* CONFIG_DEBUG_VM */
+unsigned long vmalloc_to_phys(void *va)
+{
+ unsigned long pfn = vmalloc_to_pfn(va);
+
+ BUG_ON(!pfn);
+ return __pa(pfn_to_kaddr(pfn)) + offset_in_page(va);
+}
+EXPORT_SYMBOL_GPL(vmalloc_to_phys);
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 3124a20..cdf2123 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -646,6 +646,28 @@
return pgtable;
}
+void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ VM_BUG_ON(address & ~HPAGE_PMD_MASK);
+ VM_BUG_ON(REGION_ID(address) != USER_REGION_ID);
+
+ /*
+ * We can't mark the pmd none here, because that will cause a race
+ * against exit_mmap. We need to continue mark pmd TRANS HUGE, while
+ * we spilt, but at the same time we wan't rest of the ppc64 code
+ * not to insert hash pte on this, because we will be modifying
+ * the deposited pgtable in the caller of this function. Hence
+ * clear the _PAGE_USER so that we move the fault handling to
+ * higher level function and that will serialize against ptl.
+ * We need to flush existing hash pte entries here even though,
+ * the translation is still valid, because we will withdraw
+ * pgtable_t after this.
+ */
+ pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_USER, 0);
+}
+
+
/*
* set a new huge pmd. We should not be called for updating
* an existing pmd entry. That should go via pmd_hugepage_update.
@@ -663,10 +685,20 @@
return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd));
}
+/*
+ * We use this to invalidate a pmdp entry before switching from a
+ * hugepte to regular pmd entry.
+ */
void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp)
{
pmd_hugepage_update(vma->vm_mm, address, pmdp, _PAGE_PRESENT, 0);
+
+ /*
+ * This ensures that generic code that rely on IRQ disabling
+ * to prevent a parallel THP split work as expected.
+ */
+ kick_all_cpus_sync();
}
/*
diff --git a/arch/powerpc/perf/hv-24x7.c b/arch/powerpc/perf/hv-24x7.c
index 9f9dfda..3b09ecf 100644
--- a/arch/powerpc/perf/hv-24x7.c
+++ b/arch/powerpc/perf/hv-24x7.c
@@ -493,14 +493,6 @@
}
}
-static unsigned long vmalloc_to_phys(void *v)
-{
- struct page *p = vmalloc_to_page(v);
-
- BUG_ON(!p);
- return page_to_phys(p) + offset_in_page(v);
-}
-
/* */
struct event_uniq {
struct rb_node node;
diff --git a/arch/powerpc/platforms/powernv/eeh-powernv.c b/arch/powerpc/platforms/powernv/eeh-powernv.c
index 5f152b9..87f47e5 100644
--- a/arch/powerpc/platforms/powernv/eeh-powernv.c
+++ b/arch/powerpc/platforms/powernv/eeh-powernv.c
@@ -444,9 +444,12 @@
* PCI devices of the PE are expected to be removed prior
* to PE reset.
*/
- if (!edev->pe->bus)
+ if (!(edev->pe->state & EEH_PE_PRI_BUS)) {
edev->pe->bus = pci_find_bus(hose->global_number,
pdn->busno);
+ if (edev->pe->bus)
+ edev->pe->state |= EEH_PE_PRI_BUS;
+ }
/*
* Enable EEH explicitly so that we will do EEH check
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index 573ae19..f90dc04 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -3180,6 +3180,7 @@
static const struct pci_controller_ops pnv_pci_ioda_controller_ops = {
.dma_dev_setup = pnv_pci_dma_dev_setup,
+ .dma_bus_setup = pnv_pci_dma_bus_setup,
#ifdef CONFIG_PCI_MSI
.setup_msi_irqs = pnv_setup_msi_irqs,
.teardown_msi_irqs = pnv_teardown_msi_irqs,
diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c
index 2f55c86..b1ef84a 100644
--- a/arch/powerpc/platforms/powernv/pci.c
+++ b/arch/powerpc/platforms/powernv/pci.c
@@ -599,6 +599,9 @@
u64 rpn = __pa(uaddr) >> tbl->it_page_shift;
long i;
+ if (proto_tce & TCE_PCI_WRITE)
+ proto_tce |= TCE_PCI_READ;
+
for (i = 0; i < npages; i++) {
unsigned long newtce = proto_tce |
((rpn + i) << tbl->it_page_shift);
@@ -620,6 +623,9 @@
BUG_ON(*hpa & ~IOMMU_PAGE_MASK(tbl));
+ if (newtce & TCE_PCI_WRITE)
+ newtce |= TCE_PCI_READ;
+
oldtce = xchg(pnv_tce(tbl, idx), cpu_to_be64(newtce));
*hpa = be64_to_cpu(oldtce) & ~(TCE_PCI_READ | TCE_PCI_WRITE);
*direction = iommu_tce_direction(oldtce);
@@ -760,6 +766,26 @@
phb->dma_dev_setup(phb, pdev);
}
+void pnv_pci_dma_bus_setup(struct pci_bus *bus)
+{
+ struct pci_controller *hose = bus->sysdata;
+ struct pnv_phb *phb = hose->private_data;
+ struct pnv_ioda_pe *pe;
+
+ list_for_each_entry(pe, &phb->ioda.pe_list, list) {
+ if (!(pe->flags & (PNV_IODA_PE_BUS | PNV_IODA_PE_BUS_ALL)))
+ continue;
+
+ if (!pe->pbus)
+ continue;
+
+ if (bus->number == ((pe->rid >> 8) & 0xFF)) {
+ pe->pbus = bus;
+ break;
+ }
+ }
+}
+
void pnv_pci_shutdown(void)
{
struct pci_controller *hose;
diff --git a/arch/powerpc/platforms/powernv/pci.h b/arch/powerpc/platforms/powernv/pci.h
index 7f56313..00691a9 100644
--- a/arch/powerpc/platforms/powernv/pci.h
+++ b/arch/powerpc/platforms/powernv/pci.h
@@ -242,6 +242,7 @@
extern int pnv_eeh_phb_reset(struct pci_controller *hose, int option);
extern void pnv_pci_dma_dev_setup(struct pci_dev *pdev);
+extern void pnv_pci_dma_bus_setup(struct pci_bus *bus);
extern int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type);
extern void pnv_teardown_msi_irqs(struct pci_dev *pdev);
diff --git a/arch/powerpc/sysdev/xics/icp-native.c b/arch/powerpc/sysdev/xics/icp-native.c
index eae3265..afdf62f 100644
--- a/arch/powerpc/sysdev/xics/icp-native.c
+++ b/arch/powerpc/sysdev/xics/icp-native.c
@@ -159,6 +159,27 @@
icp_native_set_qirr(cpu, IPI_PRIORITY);
}
+#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
+void icp_native_cause_ipi_rm(int cpu)
+{
+ /*
+ * Currently not used to send IPIs to another CPU
+ * on the same core. Only caller is KVM real mode.
+ * Need the physical address of the XICS to be
+ * previously saved in kvm_hstate in the paca.
+ */
+ unsigned long xics_phys;
+
+ /*
+ * Just like the cause_ipi functions, it is required to
+ * include a full barrier (out8 includes a sync) before
+ * causing the IPI.
+ */
+ xics_phys = paca[cpu].kvm_hstate.xics_phys;
+ out_rm8((u8 *)(xics_phys + XICS_MFRR), IPI_PRIORITY);
+}
+#endif
+
/*
* Called when an interrupt is received on an off-line CPU to
* clear the interrupt, so that the CPU can go back to nap mode.
diff --git a/arch/s390/include/asm/fpu/internal.h b/arch/s390/include/asm/fpu/internal.h
index ea91ddf..629c908 100644
--- a/arch/s390/include/asm/fpu/internal.h
+++ b/arch/s390/include/asm/fpu/internal.h
@@ -40,6 +40,7 @@
static inline void fpregs_store(_s390_fp_regs *fpregs, struct fpu *fpu)
{
fpregs->pad = 0;
+ fpregs->fpc = fpu->fpc;
if (MACHINE_HAS_VX)
convert_vx_to_fp((freg_t *)&fpregs->fprs, fpu->vxrs);
else
@@ -49,6 +50,7 @@
static inline void fpregs_load(_s390_fp_regs *fpregs, struct fpu *fpu)
{
+ fpu->fpc = fpregs->fpc;
if (MACHINE_HAS_VX)
convert_fp_to_vx(fpu->vxrs, (freg_t *)&fpregs->fprs);
else
diff --git a/arch/s390/include/asm/livepatch.h b/arch/s390/include/asm/livepatch.h
index 7aa7991..a52b6cc 100644
--- a/arch/s390/include/asm/livepatch.h
+++ b/arch/s390/include/asm/livepatch.h
@@ -37,7 +37,7 @@
regs->psw.addr = ip;
}
#else
-#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#error Include linux/livepatch.h, not asm/livepatch.h
#endif
#endif
diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c
index 66c9441..4af6037 100644
--- a/arch/s390/kernel/compat_signal.c
+++ b/arch/s390/kernel/compat_signal.c
@@ -271,7 +271,7 @@
/* Restore high gprs from signal stack */
if (__copy_from_user(&gprs_high, &sregs_ext->gprs_high,
- sizeof(&sregs_ext->gprs_high)))
+ sizeof(sregs_ext->gprs_high)))
return -EFAULT;
for (i = 0; i < NUM_GPRS; i++)
*(__u32 *)®s->gprs[i] = gprs_high[i];
diff --git a/arch/s390/kernel/perf_event.c b/arch/s390/kernel/perf_event.c
index cfcba2d..0943b11 100644
--- a/arch/s390/kernel/perf_event.c
+++ b/arch/s390/kernel/perf_event.c
@@ -260,12 +260,13 @@
void perf_callchain_kernel(struct perf_callchain_entry *entry,
struct pt_regs *regs)
{
- unsigned long head;
+ unsigned long head, frame_size;
struct stack_frame *head_sf;
if (user_mode(regs))
return;
+ frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
head = regs->gprs[15];
head_sf = (struct stack_frame *) head;
@@ -273,8 +274,9 @@
return;
head = head_sf->back_chain;
- head = __store_trace(entry, head, S390_lowcore.async_stack - ASYNC_SIZE,
- S390_lowcore.async_stack);
+ head = __store_trace(entry, head,
+ S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+ S390_lowcore.async_stack + frame_size);
__store_trace(entry, head, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
diff --git a/arch/s390/kernel/stacktrace.c b/arch/s390/kernel/stacktrace.c
index 5acba3c..8f64ebd 100644
--- a/arch/s390/kernel/stacktrace.c
+++ b/arch/s390/kernel/stacktrace.c
@@ -59,26 +59,32 @@
}
}
-void save_stack_trace(struct stack_trace *trace)
+static void __save_stack_trace(struct stack_trace *trace, unsigned long sp)
{
- register unsigned long sp asm ("15");
- unsigned long orig_sp, new_sp;
+ unsigned long new_sp, frame_size;
- orig_sp = sp;
- new_sp = save_context_stack(trace, orig_sp,
- S390_lowcore.panic_stack - PAGE_SIZE,
- S390_lowcore.panic_stack, 1);
- if (new_sp != orig_sp)
- return;
+ frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
+ new_sp = save_context_stack(trace, sp,
+ S390_lowcore.panic_stack + frame_size - PAGE_SIZE,
+ S390_lowcore.panic_stack + frame_size, 1);
new_sp = save_context_stack(trace, new_sp,
- S390_lowcore.async_stack - ASYNC_SIZE,
- S390_lowcore.async_stack, 1);
- if (new_sp != orig_sp)
- return;
+ S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+ S390_lowcore.async_stack + frame_size, 1);
save_context_stack(trace, new_sp,
S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE, 1);
}
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ register unsigned long r15 asm ("15");
+ unsigned long sp;
+
+ sp = r15;
+ __save_stack_trace(trace, sp);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
EXPORT_SYMBOL_GPL(save_stack_trace);
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
@@ -86,6 +92,10 @@
unsigned long sp, low, high;
sp = tsk->thread.ksp;
+ if (tsk == current) {
+ /* Get current stack pointer. */
+ asm volatile("la %0,0(15)" : "=a" (sp));
+ }
low = (unsigned long) task_stack_page(tsk);
high = (unsigned long) task_pt_regs(tsk);
save_context_stack(trace, sp, low, high, 0);
@@ -93,3 +103,14 @@
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
+{
+ unsigned long sp;
+
+ sp = kernel_stack_pointer(regs);
+ __save_stack_trace(trace, sp);
+ if (trace->nr_entries < trace->max_entries)
+ trace->entries[trace->nr_entries++] = ULONG_MAX;
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_regs);
diff --git a/arch/s390/kernel/trace.c b/arch/s390/kernel/trace.c
index 21a5df9..dde7654 100644
--- a/arch/s390/kernel/trace.c
+++ b/arch/s390/kernel/trace.c
@@ -18,6 +18,9 @@
unsigned long flags;
unsigned int *depth;
+ /* Avoid lockdep recursion. */
+ if (IS_ENABLED(CONFIG_LOCKDEP))
+ return;
local_irq_save(flags);
depth = this_cpu_ptr(&diagnose_trace_depth);
if (*depth == 0) {
diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c
index fec59c0..792f9c6 100644
--- a/arch/s390/mm/maccess.c
+++ b/arch/s390/mm/maccess.c
@@ -93,15 +93,19 @@
*/
int memcpy_real(void *dest, void *src, size_t count)
{
+ int irqs_disabled, rc;
unsigned long flags;
- int rc;
if (!count)
return 0;
- local_irq_save(flags);
- __arch_local_irq_stnsm(0xfbUL);
+ flags = __arch_local_irq_stnsm(0xf8UL);
+ irqs_disabled = arch_irqs_disabled_flags(flags);
+ if (!irqs_disabled)
+ trace_hardirqs_off();
rc = __memcpy_real(dest, src, count);
- local_irq_restore(flags);
+ if (!irqs_disabled)
+ trace_hardirqs_on();
+ __arch_local_irq_ssm(flags);
return rc;
}
diff --git a/arch/s390/oprofile/backtrace.c b/arch/s390/oprofile/backtrace.c
index fe0bfe3..1884e17 100644
--- a/arch/s390/oprofile/backtrace.c
+++ b/arch/s390/oprofile/backtrace.c
@@ -54,12 +54,13 @@
void s390_backtrace(struct pt_regs * const regs, unsigned int depth)
{
- unsigned long head;
+ unsigned long head, frame_size;
struct stack_frame* head_sf;
if (user_mode(regs))
return;
+ frame_size = STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
head = regs->gprs[15];
head_sf = (struct stack_frame*)head;
@@ -68,8 +69,9 @@
head = head_sf->back_chain;
- head = __show_trace(&depth, head, S390_lowcore.async_stack - ASYNC_SIZE,
- S390_lowcore.async_stack);
+ head = __show_trace(&depth, head,
+ S390_lowcore.async_stack + frame_size - ASYNC_SIZE,
+ S390_lowcore.async_stack + frame_size);
__show_trace(&depth, head, S390_lowcore.thread_info,
S390_lowcore.thread_info + THREAD_SIZE);
diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
index c690c8e..b489e97 100644
--- a/arch/sparc/kernel/sys_sparc_64.c
+++ b/arch/sparc/kernel/sys_sparc_64.c
@@ -264,7 +264,7 @@
unsigned long rnd = 0UL;
if (current->flags & PF_RANDOMIZE) {
- unsigned long val = get_random_int();
+ unsigned long val = get_random_long();
if (test_thread_flag(TIF_32BIT))
rnd = (val % (1UL << (23UL-PAGE_SHIFT)));
else
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 9af2e63..c46662f 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -475,6 +475,7 @@
depends on X86_64
depends on X86_EXTENDED_PLATFORM
depends on NUMA
+ depends on EFI
depends on X86_X2APIC
depends on PCI
---help---
@@ -777,8 +778,8 @@
HPET is the next generation timer replacing legacy 8254s.
The HPET provides a stable time base on SMP
systems, unlike the TSC, but it is more expensive to access,
- as it is off-chip. You can find the HPET spec at
- <http://www.intel.com/hardwaredesign/hpetspec_1.pdf>.
+ as it is off-chip. The interface used is documented
+ in the HPET spec, revision 1.
You can safely choose Y here. However, HPET will only be
activated if the platform and the BIOS support this feature.
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
index 77d8c51..bb3e376 100644
--- a/arch/x86/entry/entry_32.S
+++ b/arch/x86/entry/entry_32.S
@@ -294,6 +294,7 @@
pushl $__USER_DS /* pt_regs->ss */
pushl %ebp /* pt_regs->sp (stashed in bp) */
pushfl /* pt_regs->flags (except IF = 0) */
+ ASM_CLAC /* Clear AC after saving FLAGS */
orl $X86_EFLAGS_IF, (%esp) /* Fix IF */
pushl $__USER_CS /* pt_regs->cs */
pushl $0 /* pt_regs->ip = 0 (placeholder) */
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
index ff1c6d6..3c990ee 100644
--- a/arch/x86/entry/entry_64_compat.S
+++ b/arch/x86/entry/entry_64_compat.S
@@ -261,6 +261,7 @@
* Interrupts are off on entry.
*/
PARAVIRT_ADJUST_EXCEPTION_FRAME
+ ASM_CLAC /* Do this early to minimize exposure */
SWAPGS
/*
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 7b54599..01c8b50 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -32,6 +32,7 @@
#include <asm/mtrr.h>
#include <asm/msr-index.h>
#include <asm/asm.h>
+#include <asm/kvm_page_track.h>
#define KVM_MAX_VCPUS 255
#define KVM_SOFT_MAX_VCPUS 160
@@ -214,6 +215,14 @@
void *objects[KVM_NR_MEM_OBJS];
};
+/*
+ * the pages used as guest page table on soft mmu are tracked by
+ * kvm_memory_slot.arch.gfn_track which is 16 bits, so the role bits used
+ * by indirect shadow page can not be more than 15 bits.
+ *
+ * Currently, we used 14 bits that are @level, @cr4_pae, @quadrant, @access,
+ * @nxe, @cr0_wp, @smep_andnot_wp and @smap_andnot_wp.
+ */
union kvm_mmu_page_role {
unsigned word;
struct {
@@ -276,7 +285,7 @@
#endif
/* Number of writes since the last time traversal visited this page. */
- int write_flooding_count;
+ atomic_t write_flooding_count;
};
struct kvm_pio_request {
@@ -338,12 +347,8 @@
struct rsvd_bits_validate guest_rsvd_check;
- /*
- * Bitmap: bit set = last pte in walk
- * index[0:1]: level (zero-based)
- * index[2]: pte.ps
- */
- u8 last_pte_bitmap;
+ /* Can have large pages at levels 2..last_nonleaf_level-1. */
+ u8 last_nonleaf_level;
bool nx;
@@ -498,7 +503,6 @@
struct kvm_mmu_memory_cache mmu_page_header_cache;
struct fpu guest_fpu;
- bool eager_fpu;
u64 xcr0;
u64 guest_supported_xcr0;
u32 guest_xstate_size;
@@ -644,12 +648,13 @@
};
struct kvm_lpage_info {
- int write_count;
+ int disallow_lpage;
};
struct kvm_arch_memory_slot {
struct kvm_rmap_head *rmap[KVM_NR_PAGE_SIZES];
struct kvm_lpage_info *lpage_info[KVM_NR_PAGE_SIZES - 1];
+ unsigned short *gfn_track[KVM_PAGE_TRACK_MAX];
};
/*
@@ -694,6 +699,8 @@
*/
struct list_head active_mmu_pages;
struct list_head zapped_obsolete_pages;
+ struct kvm_page_track_notifier_node mmu_sp_tracker;
+ struct kvm_page_track_notifier_head track_notifier_head;
struct list_head assigned_dev_head;
struct iommu_domain *iommu_domain;
@@ -990,6 +997,8 @@
void kvm_mmu_destroy(struct kvm_vcpu *vcpu);
int kvm_mmu_create(struct kvm_vcpu *vcpu);
void kvm_mmu_setup(struct kvm_vcpu *vcpu);
+void kvm_mmu_init_vm(struct kvm *kvm);
+void kvm_mmu_uninit_vm(struct kvm *kvm);
void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
u64 dirty_mask, u64 nx_mask, u64 x_mask);
@@ -1129,8 +1138,6 @@
void kvm_inject_nmi(struct kvm_vcpu *vcpu);
-void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes);
int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/include/asm/kvm_page_track.h b/arch/x86/include/asm/kvm_page_track.h
new file mode 100644
index 0000000..c2b8d24
--- /dev/null
+++ b/arch/x86/include/asm/kvm_page_track.h
@@ -0,0 +1,61 @@
+#ifndef _ASM_X86_KVM_PAGE_TRACK_H
+#define _ASM_X86_KVM_PAGE_TRACK_H
+
+enum kvm_page_track_mode {
+ KVM_PAGE_TRACK_WRITE,
+ KVM_PAGE_TRACK_MAX,
+};
+
+/*
+ * The notifier represented by @kvm_page_track_notifier_node is linked into
+ * the head which will be notified when guest is triggering the track event.
+ *
+ * Write access on the head is protected by kvm->mmu_lock, read access
+ * is protected by track_srcu.
+ */
+struct kvm_page_track_notifier_head {
+ struct srcu_struct track_srcu;
+ struct hlist_head track_notifier_list;
+};
+
+struct kvm_page_track_notifier_node {
+ struct hlist_node node;
+
+ /*
+ * It is called when guest is writing the write-tracked page
+ * and write emulation is finished at that time.
+ *
+ * @vcpu: the vcpu where the write access happened.
+ * @gpa: the physical address written by guest.
+ * @new: the data was written to the address.
+ * @bytes: the written length.
+ */
+ void (*track_write)(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
+ int bytes);
+};
+
+void kvm_page_track_init(struct kvm *kvm);
+
+void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont);
+int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
+ unsigned long npages);
+
+void kvm_slot_page_track_add_page(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ enum kvm_page_track_mode mode);
+void kvm_slot_page_track_remove_page(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ enum kvm_page_track_mode mode);
+bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
+ enum kvm_page_track_mode mode);
+
+void
+kvm_page_track_register_notifier(struct kvm *kvm,
+ struct kvm_page_track_notifier_node *n);
+void
+kvm_page_track_unregister_notifier(struct kvm *kvm,
+ struct kvm_page_track_notifier_node *n);
+void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
+ int bytes);
+#endif
diff --git a/arch/x86/include/asm/livepatch.h b/arch/x86/include/asm/livepatch.h
index 19c099a..e795f52 100644
--- a/arch/x86/include/asm/livepatch.h
+++ b/arch/x86/include/asm/livepatch.h
@@ -41,7 +41,7 @@
regs->ip = ip;
}
#else
-#error Live patching support is disabled; check CONFIG_LIVEPATCH
+#error Include linux/livepatch.h, not asm/livepatch.h
#endif
#endif /* _ASM_X86_LIVEPATCH_H */
diff --git a/arch/x86/include/asm/pci_x86.h b/arch/x86/include/asm/pci_x86.h
index 46873fb..d08eacd2 100644
--- a/arch/x86/include/asm/pci_x86.h
+++ b/arch/x86/include/asm/pci_x86.h
@@ -93,6 +93,8 @@
extern int (*pcibios_enable_irq)(struct pci_dev *dev);
extern void (*pcibios_disable_irq)(struct pci_dev *dev);
+extern bool mp_should_keep_irq(struct device *dev);
+
struct pci_raw_ops {
int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn,
int reg, int len, u32 *val);
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 2d5a50c..20c11d1 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -766,7 +766,7 @@
* Return saved PC of a blocked thread.
* What is this good for? it will be always the scheduler or ret_from_fork.
*/
-#define thread_saved_pc(t) (*(unsigned long *)((t)->thread.sp - 8))
+#define thread_saved_pc(t) READ_ONCE_NOCHECK(*(unsigned long *)((t)->thread.sp - 8))
#define task_pt_regs(tsk) ((struct pt_regs *)(tsk)->thread.sp0 - 1)
extern unsigned long KSTK_ESP(struct task_struct *task);
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index f5dcb52..3fe0eac 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -48,20 +48,28 @@
switch (n) {
case 1:
+ __uaccess_begin();
__put_user_size(*(u8 *)from, (u8 __user *)to,
1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin();
__put_user_size(*(u16 *)from, (u16 __user *)to,
2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin();
__put_user_size(*(u32 *)from, (u32 __user *)to,
4, ret, 4);
+ __uaccess_end();
return ret;
case 8:
+ __uaccess_begin();
__put_user_size(*(u64 *)from, (u64 __user *)to,
8, ret, 8);
+ __uaccess_end();
return ret;
}
}
@@ -103,13 +111,19 @@
switch (n) {
case 1:
+ __uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
@@ -148,13 +162,19 @@
switch (n) {
case 1:
+ __uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
@@ -170,13 +190,19 @@
switch (n) {
case 1:
+ __uaccess_begin();
__get_user_size(*(u8 *)to, from, 1, ret, 1);
+ __uaccess_end();
return ret;
case 2:
+ __uaccess_begin();
__get_user_size(*(u16 *)to, from, 2, ret, 2);
+ __uaccess_end();
return ret;
case 4:
+ __uaccess_begin();
__get_user_size(*(u32 *)to, from, 4, ret, 4);
+ __uaccess_end();
return ret;
}
}
diff --git a/arch/x86/include/asm/xen/pci.h b/arch/x86/include/asm/xen/pci.h
index 968d57d..f320ee3 100644
--- a/arch/x86/include/asm/xen/pci.h
+++ b/arch/x86/include/asm/xen/pci.h
@@ -57,7 +57,7 @@
{
if (xen_pci_frontend && xen_pci_frontend->enable_msi)
return xen_pci_frontend->enable_msi(dev, vectors);
- return -ENODEV;
+ return -ENOSYS;
}
static inline void xen_pci_frontend_disable_msi(struct pci_dev *dev)
{
@@ -69,7 +69,7 @@
{
if (xen_pci_frontend && xen_pci_frontend->enable_msix)
return xen_pci_frontend->enable_msix(dev, vectors, nvec);
- return -ENODEV;
+ return -ENOSYS;
}
static inline void xen_pci_frontend_disable_msix(struct pci_dev *dev)
{
diff --git a/arch/x86/include/uapi/asm/hyperv.h b/arch/x86/include/uapi/asm/hyperv.h
index 7956412..9b1a918 100644
--- a/arch/x86/include/uapi/asm/hyperv.h
+++ b/arch/x86/include/uapi/asm/hyperv.h
@@ -226,7 +226,9 @@
(~((1ull << HV_X64_MSR_HYPERCALL_PAGE_ADDRESS_SHIFT) - 1))
/* Declare the various hypercall operations. */
-#define HV_X64_HV_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_NOTIFY_LONG_SPIN_WAIT 0x0008
+#define HVCALL_POST_MESSAGE 0x005c
+#define HVCALL_SIGNAL_EVENT 0x005d
#define HV_X64_MSR_APIC_ASSIST_PAGE_ENABLE 0x00000001
#define HV_X64_MSR_APIC_ASSIST_PAGE_ADDRESS_SHIFT 12
diff --git a/arch/x86/kernel/cpu/perf_event_amd_uncore.c b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
index 4974274..8836fc9 100644
--- a/arch/x86/kernel/cpu/perf_event_amd_uncore.c
+++ b/arch/x86/kernel/cpu/perf_event_amd_uncore.c
@@ -323,6 +323,8 @@
return 0;
fail:
+ if (amd_uncore_nb)
+ *per_cpu_ptr(amd_uncore_nb, cpu) = NULL;
kfree(uncore_nb);
return -ENOMEM;
}
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile
index a1ff508..464fa47 100644
--- a/arch/x86/kvm/Makefile
+++ b/arch/x86/kvm/Makefile
@@ -13,9 +13,10 @@
kvm-y += x86.o mmu.o emulate.o i8259.o irq.o lapic.o \
i8254.o ioapic.o irq_comm.o cpuid.o pmu.o mtrr.o \
- hyperv.o
+ hyperv.o page_track.o
kvm-$(CONFIG_KVM_DEVICE_ASSIGNMENT) += assigned-dev.o iommu.o
+
kvm-intel-y += vmx.o pmu_intel.o
kvm-amd-y += svm.o pmu_amd.o
diff --git a/arch/x86/kvm/assigned-dev.c b/arch/x86/kvm/assigned-dev.c
index 9dc091a..308b859 100644
--- a/arch/x86/kvm/assigned-dev.c
+++ b/arch/x86/kvm/assigned-dev.c
@@ -51,11 +51,9 @@
static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
int assigned_dev_id)
{
- struct list_head *ptr;
struct kvm_assigned_dev_kernel *match;
- list_for_each(ptr, head) {
- match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
+ list_for_each_entry(match, head, list) {
if (match->assigned_dev_id == assigned_dev_id)
return match;
}
@@ -373,14 +371,10 @@
void kvm_free_all_assigned_devices(struct kvm *kvm)
{
- struct list_head *ptr, *ptr2;
- struct kvm_assigned_dev_kernel *assigned_dev;
+ struct kvm_assigned_dev_kernel *assigned_dev, *tmp;
- list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
- assigned_dev = list_entry(ptr,
- struct kvm_assigned_dev_kernel,
- list);
-
+ list_for_each_entry_safe(assigned_dev, tmp,
+ &kvm->arch.assigned_dev_head, list) {
kvm_free_assigned_device(kvm, assigned_dev);
}
}
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 6525e92..0029644 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -46,11 +46,18 @@
return ret;
}
+bool kvm_mpx_supported(void)
+{
+ return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
+ && kvm_x86_ops->mpx_supported());
+}
+EXPORT_SYMBOL_GPL(kvm_mpx_supported);
+
u64 kvm_supported_xcr0(void)
{
u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
- if (!kvm_x86_ops->mpx_supported())
+ if (!kvm_mpx_supported())
xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
return xcr0;
@@ -97,8 +104,7 @@
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu);
- if (vcpu->arch.eager_fpu)
+ if (use_eager_fpu())
kvm_x86_ops->fpu_activate(vcpu);
/*
@@ -295,7 +301,7 @@
#endif
unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
- unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0;
+ unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
/* cpuid 1.edx */
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index c8eda14..66a6581 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -5,6 +5,7 @@
#include <asm/cpu.h>
int kvm_update_cpuid(struct kvm_vcpu *vcpu);
+bool kvm_mpx_supported(void);
struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
u32 function, u32 index);
int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
@@ -135,14 +136,6 @@
return best && (best->ebx & bit(X86_FEATURE_RTM));
}
-static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
-{
- struct kvm_cpuid_entry2 *best;
-
- best = kvm_find_cpuid_entry(vcpu, 7, 0);
- return best && (best->ebx & bit(X86_FEATURE_MPX));
-}
-
static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
{
struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 1505587..b9b09fe 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -650,10 +650,10 @@
u16 sel;
la = seg_base(ctxt, addr.seg) + addr.ea;
- *linear = la;
*max_size = 0;
switch (mode) {
case X86EMUL_MODE_PROT64:
+ *linear = la;
if (is_noncanonical_address(la))
goto bad;
@@ -662,6 +662,7 @@
goto bad;
break;
default:
+ *linear = la = (u32)la;
usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
addr.seg);
if (!usable)
@@ -689,7 +690,6 @@
if (size > *max_size)
goto bad;
}
- la &= (u32)-1;
break;
}
if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
diff --git a/arch/x86/kvm/hyperv.c b/arch/x86/kvm/hyperv.c
index c58ba67..5ff3485 100644
--- a/arch/x86/kvm/hyperv.c
+++ b/arch/x86/kvm/hyperv.c
@@ -1043,6 +1043,27 @@
return kvm->arch.hyperv.hv_hypercall & HV_X64_MSR_HYPERCALL_ENABLE;
}
+static void kvm_hv_hypercall_set_result(struct kvm_vcpu *vcpu, u64 result)
+{
+ bool longmode;
+
+ longmode = is_64_bit_mode(vcpu);
+ if (longmode)
+ kvm_register_write(vcpu, VCPU_REGS_RAX, result);
+ else {
+ kvm_register_write(vcpu, VCPU_REGS_RDX, result >> 32);
+ kvm_register_write(vcpu, VCPU_REGS_RAX, result & 0xffffffff);
+ }
+}
+
+static int kvm_hv_hypercall_complete_userspace(struct kvm_vcpu *vcpu)
+{
+ struct kvm_run *run = vcpu->run;
+
+ kvm_hv_hypercall_set_result(vcpu, run->hyperv.u.hcall.result);
+ return 1;
+}
+
int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
{
u64 param, ingpa, outgpa, ret;
@@ -1055,7 +1076,7 @@
*/
if (kvm_x86_ops->get_cpl(vcpu) != 0 || !is_protmode(vcpu)) {
kvm_queue_exception(vcpu, UD_VECTOR);
- return 0;
+ return 1;
}
longmode = is_64_bit_mode(vcpu);
@@ -1083,22 +1104,33 @@
trace_kvm_hv_hypercall(code, fast, rep_cnt, rep_idx, ingpa, outgpa);
+ /* Hypercall continuation is not supported yet */
+ if (rep_cnt || rep_idx) {
+ res = HV_STATUS_INVALID_HYPERCALL_CODE;
+ goto set_result;
+ }
+
switch (code) {
- case HV_X64_HV_NOTIFY_LONG_SPIN_WAIT:
+ case HVCALL_NOTIFY_LONG_SPIN_WAIT:
kvm_vcpu_on_spin(vcpu);
break;
+ case HVCALL_POST_MESSAGE:
+ case HVCALL_SIGNAL_EVENT:
+ vcpu->run->exit_reason = KVM_EXIT_HYPERV;
+ vcpu->run->hyperv.type = KVM_EXIT_HYPERV_HCALL;
+ vcpu->run->hyperv.u.hcall.input = param;
+ vcpu->run->hyperv.u.hcall.params[0] = ingpa;
+ vcpu->run->hyperv.u.hcall.params[1] = outgpa;
+ vcpu->arch.complete_userspace_io =
+ kvm_hv_hypercall_complete_userspace;
+ return 0;
default:
res = HV_STATUS_INVALID_HYPERCALL_CODE;
break;
}
+set_result:
ret = res | (((u64)rep_done & 0xfff) << 32);
- if (longmode) {
- kvm_register_write(vcpu, VCPU_REGS_RAX, ret);
- } else {
- kvm_register_write(vcpu, VCPU_REGS_RDX, ret >> 32);
- kvm_register_write(vcpu, VCPU_REGS_RAX, ret & 0xffffffff);
- }
-
+ kvm_hv_hypercall_set_result(vcpu, ret);
return 1;
}
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index b0ea42b..a4bf5b4 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -51,32 +51,9 @@
#define RW_STATE_WORD0 3
#define RW_STATE_WORD1 4
-/* Compute with 96 bit intermediate result: (a*b)/c */
-static u64 muldiv64(u64 a, u32 b, u32 c)
+static void pit_set_gate(struct kvm_pit *pit, int channel, u32 val)
{
- union {
- u64 ll;
- struct {
- u32 low, high;
- } l;
- } u, res;
- u64 rl, rh;
-
- u.ll = a;
- rl = (u64)u.l.low * (u64)b;
- rh = (u64)u.l.high * (u64)b;
- rh += (rl >> 32);
- res.l.high = div64_u64(rh, c);
- res.l.low = div64_u64(((mod_64(rh, c) << 32) + (rl & 0xffffffff)), c);
- return res.ll;
-}
-
-static void pit_set_gate(struct kvm *kvm, int channel, u32 val)
-{
- struct kvm_kpit_channel_state *c =
- &kvm->arch.vpit->pit_state.channels[channel];
-
- WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+ struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
switch (c->mode) {
default:
@@ -97,18 +74,16 @@
c->gate = val;
}
-static int pit_get_gate(struct kvm *kvm, int channel)
+static int pit_get_gate(struct kvm_pit *pit, int channel)
{
- WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
-
- return kvm->arch.vpit->pit_state.channels[channel].gate;
+ return pit->pit_state.channels[channel].gate;
}
-static s64 __kpit_elapsed(struct kvm *kvm)
+static s64 __kpit_elapsed(struct kvm_pit *pit)
{
s64 elapsed;
ktime_t remaining;
- struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+ struct kvm_kpit_state *ps = &pit->pit_state;
if (!ps->period)
return 0;
@@ -128,26 +103,23 @@
return elapsed;
}
-static s64 kpit_elapsed(struct kvm *kvm, struct kvm_kpit_channel_state *c,
+static s64 kpit_elapsed(struct kvm_pit *pit, struct kvm_kpit_channel_state *c,
int channel)
{
if (channel == 0)
- return __kpit_elapsed(kvm);
+ return __kpit_elapsed(pit);
return ktime_to_ns(ktime_sub(ktime_get(), c->count_load_time));
}
-static int pit_get_count(struct kvm *kvm, int channel)
+static int pit_get_count(struct kvm_pit *pit, int channel)
{
- struct kvm_kpit_channel_state *c =
- &kvm->arch.vpit->pit_state.channels[channel];
+ struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
s64 d, t;
int counter;
- WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
-
- t = kpit_elapsed(kvm, c, channel);
- d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
+ t = kpit_elapsed(pit, c, channel);
+ d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
switch (c->mode) {
case 0:
@@ -167,17 +139,14 @@
return counter;
}
-static int pit_get_out(struct kvm *kvm, int channel)
+static int pit_get_out(struct kvm_pit *pit, int channel)
{
- struct kvm_kpit_channel_state *c =
- &kvm->arch.vpit->pit_state.channels[channel];
+ struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
s64 d, t;
int out;
- WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
-
- t = kpit_elapsed(kvm, c, channel);
- d = muldiv64(t, KVM_PIT_FREQ, NSEC_PER_SEC);
+ t = kpit_elapsed(pit, c, channel);
+ d = mul_u64_u32_div(t, KVM_PIT_FREQ, NSEC_PER_SEC);
switch (c->mode) {
default:
@@ -202,29 +171,23 @@
return out;
}
-static void pit_latch_count(struct kvm *kvm, int channel)
+static void pit_latch_count(struct kvm_pit *pit, int channel)
{
- struct kvm_kpit_channel_state *c =
- &kvm->arch.vpit->pit_state.channels[channel];
-
- WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+ struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
if (!c->count_latched) {
- c->latched_count = pit_get_count(kvm, channel);
+ c->latched_count = pit_get_count(pit, channel);
c->count_latched = c->rw_mode;
}
}
-static void pit_latch_status(struct kvm *kvm, int channel)
+static void pit_latch_status(struct kvm_pit *pit, int channel)
{
- struct kvm_kpit_channel_state *c =
- &kvm->arch.vpit->pit_state.channels[channel];
-
- WARN_ON(!mutex_is_locked(&kvm->arch.vpit->pit_state.lock));
+ struct kvm_kpit_channel_state *c = &pit->pit_state.channels[channel];
if (!c->status_latched) {
/* TODO: Return NULL COUNT (bit 6). */
- c->status = ((pit_get_out(kvm, channel) << 7) |
+ c->status = ((pit_get_out(pit, channel) << 7) |
(c->rw_mode << 4) |
(c->mode << 1) |
c->bcd);
@@ -232,26 +195,24 @@
}
}
+static inline struct kvm_pit *pit_state_to_pit(struct kvm_kpit_state *ps)
+{
+ return container_of(ps, struct kvm_pit, pit_state);
+}
+
static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
{
struct kvm_kpit_state *ps = container_of(kian, struct kvm_kpit_state,
irq_ack_notifier);
- int value;
+ struct kvm_pit *pit = pit_state_to_pit(ps);
- spin_lock(&ps->inject_lock);
- value = atomic_dec_return(&ps->pending);
- if (value < 0)
- /* spurious acks can be generated if, for example, the
- * PIC is being reset. Handle it gracefully here
- */
- atomic_inc(&ps->pending);
- else if (value > 0)
- /* in this case, we had multiple outstanding pit interrupts
- * that we needed to inject. Reinject
- */
- queue_kthread_work(&ps->pit->worker, &ps->pit->expired);
- ps->irq_ack = 1;
- spin_unlock(&ps->inject_lock);
+ atomic_set(&ps->irq_ack, 1);
+ /* irq_ack should be set before pending is read. Order accesses with
+ * inc(pending) in pit_timer_fn and xchg(irq_ack, 0) in pit_do_work.
+ */
+ smp_mb();
+ if (atomic_dec_if_positive(&ps->pending) > 0)
+ queue_kthread_work(&pit->worker, &pit->expired);
}
void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
@@ -282,45 +243,36 @@
struct kvm_vcpu *vcpu;
int i;
struct kvm_kpit_state *ps = &pit->pit_state;
- int inject = 0;
- /* Try to inject pending interrupts when
- * last one has been acked.
+ if (atomic_read(&ps->reinject) && !atomic_xchg(&ps->irq_ack, 0))
+ return;
+
+ kvm_set_irq(kvm, pit->irq_source_id, 0, 1, false);
+ kvm_set_irq(kvm, pit->irq_source_id, 0, 0, false);
+
+ /*
+ * Provides NMI watchdog support via Virtual Wire mode.
+ * The route is: PIT -> LVT0 in NMI mode.
+ *
+ * Note: Our Virtual Wire implementation does not follow
+ * the MP specification. We propagate a PIT interrupt to all
+ * VCPUs and only when LVT0 is in NMI mode. The interrupt can
+ * also be simultaneously delivered through PIC and IOAPIC.
*/
- spin_lock(&ps->inject_lock);
- if (ps->irq_ack) {
- ps->irq_ack = 0;
- inject = 1;
- }
- spin_unlock(&ps->inject_lock);
- if (inject) {
- kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 1, false);
- kvm_set_irq(kvm, kvm->arch.vpit->irq_source_id, 0, 0, false);
-
- /*
- * Provides NMI watchdog support via Virtual Wire mode.
- * The route is: PIT -> PIC -> LVT0 in NMI mode.
- *
- * Note: Our Virtual Wire implementation is simplified, only
- * propagating PIT interrupts to all VCPUs when they have set
- * LVT0 to NMI delivery. Other PIC interrupts are just sent to
- * VCPU0, and only if its LVT0 is in EXTINT mode.
- */
- if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
- kvm_for_each_vcpu(i, vcpu, kvm)
- kvm_apic_nmi_wd_deliver(vcpu);
- }
+ if (atomic_read(&kvm->arch.vapics_in_nmi_mode) > 0)
+ kvm_for_each_vcpu(i, vcpu, kvm)
+ kvm_apic_nmi_wd_deliver(vcpu);
}
static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
{
struct kvm_kpit_state *ps = container_of(data, struct kvm_kpit_state, timer);
- struct kvm_pit *pt = ps->kvm->arch.vpit;
+ struct kvm_pit *pt = pit_state_to_pit(ps);
- if (ps->reinject || !atomic_read(&ps->pending)) {
+ if (atomic_read(&ps->reinject))
atomic_inc(&ps->pending);
- queue_kthread_work(&pt->worker, &pt->expired);
- }
+
+ queue_kthread_work(&pt->worker, &pt->expired);
if (ps->is_periodic) {
hrtimer_add_expires_ns(&ps->timer, ps->period);
@@ -329,30 +281,54 @@
return HRTIMER_NORESTART;
}
-static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
+static inline void kvm_pit_reset_reinject(struct kvm_pit *pit)
{
- struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
+ atomic_set(&pit->pit_state.pending, 0);
+ atomic_set(&pit->pit_state.irq_ack, 1);
+}
+
+void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject)
+{
+ struct kvm_kpit_state *ps = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
+
+ if (atomic_read(&ps->reinject) == reinject)
+ return;
+
+ if (reinject) {
+ /* The initial state is preserved while ps->reinject == 0. */
+ kvm_pit_reset_reinject(pit);
+ kvm_register_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
+ kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
+ } else {
+ kvm_unregister_irq_ack_notifier(kvm, &ps->irq_ack_notifier);
+ kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
+ }
+
+ atomic_set(&ps->reinject, reinject);
+}
+
+static void create_pit_timer(struct kvm_pit *pit, u32 val, int is_period)
+{
+ struct kvm_kpit_state *ps = &pit->pit_state;
+ struct kvm *kvm = pit->kvm;
s64 interval;
if (!ioapic_in_kernel(kvm) ||
ps->flags & KVM_PIT_FLAGS_HPET_LEGACY)
return;
- interval = muldiv64(val, NSEC_PER_SEC, KVM_PIT_FREQ);
+ interval = mul_u64_u32_div(val, NSEC_PER_SEC, KVM_PIT_FREQ);
pr_debug("create pit timer, interval is %llu nsec\n", interval);
/* TODO The new value only affected after the retriggered */
hrtimer_cancel(&ps->timer);
- flush_kthread_work(&ps->pit->expired);
+ flush_kthread_work(&pit->expired);
ps->period = interval;
ps->is_periodic = is_period;
- ps->timer.function = pit_timer_fn;
- ps->kvm = ps->pit->kvm;
-
- atomic_set(&ps->pending, 0);
- ps->irq_ack = 1;
+ kvm_pit_reset_reinject(pit);
/*
* Do not allow the guest to program periodic timers with small
@@ -375,11 +351,9 @@
HRTIMER_MODE_ABS);
}
-static void pit_load_count(struct kvm *kvm, int channel, u32 val)
+static void pit_load_count(struct kvm_pit *pit, int channel, u32 val)
{
- struct kvm_kpit_state *ps = &kvm->arch.vpit->pit_state;
-
- WARN_ON(!mutex_is_locked(&ps->lock));
+ struct kvm_kpit_state *ps = &pit->pit_state;
pr_debug("load_count val is %d, channel is %d\n", val, channel);
@@ -404,29 +378,33 @@
case 1:
/* FIXME: enhance mode 4 precision */
case 4:
- create_pit_timer(kvm, val, 0);
+ create_pit_timer(pit, val, 0);
break;
case 2:
case 3:
- create_pit_timer(kvm, val, 1);
+ create_pit_timer(pit, val, 1);
break;
default:
- destroy_pit_timer(kvm->arch.vpit);
+ destroy_pit_timer(pit);
}
}
-void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start)
+void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
+ int hpet_legacy_start)
{
u8 saved_mode;
+
+ WARN_ON_ONCE(!mutex_is_locked(&pit->pit_state.lock));
+
if (hpet_legacy_start) {
/* save existing mode for later reenablement */
WARN_ON(channel != 0);
- saved_mode = kvm->arch.vpit->pit_state.channels[0].mode;
- kvm->arch.vpit->pit_state.channels[0].mode = 0xff; /* disable timer */
- pit_load_count(kvm, channel, val);
- kvm->arch.vpit->pit_state.channels[0].mode = saved_mode;
+ saved_mode = pit->pit_state.channels[0].mode;
+ pit->pit_state.channels[0].mode = 0xff; /* disable timer */
+ pit_load_count(pit, channel, val);
+ pit->pit_state.channels[0].mode = saved_mode;
} else {
- pit_load_count(kvm, channel, val);
+ pit_load_count(pit, channel, val);
}
}
@@ -452,7 +430,6 @@
{
struct kvm_pit *pit = dev_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
- struct kvm *kvm = pit->kvm;
int channel, access;
struct kvm_kpit_channel_state *s;
u32 val = *(u32 *) data;
@@ -476,9 +453,9 @@
s = &pit_state->channels[channel];
if (val & (2 << channel)) {
if (!(val & 0x20))
- pit_latch_count(kvm, channel);
+ pit_latch_count(pit, channel);
if (!(val & 0x10))
- pit_latch_status(kvm, channel);
+ pit_latch_status(pit, channel);
}
}
} else {
@@ -486,7 +463,7 @@
s = &pit_state->channels[channel];
access = (val >> 4) & KVM_PIT_CHANNEL_MASK;
if (access == 0) {
- pit_latch_count(kvm, channel);
+ pit_latch_count(pit, channel);
} else {
s->rw_mode = access;
s->read_state = access;
@@ -503,17 +480,17 @@
switch (s->write_state) {
default:
case RW_STATE_LSB:
- pit_load_count(kvm, addr, val);
+ pit_load_count(pit, addr, val);
break;
case RW_STATE_MSB:
- pit_load_count(kvm, addr, val << 8);
+ pit_load_count(pit, addr, val << 8);
break;
case RW_STATE_WORD0:
s->write_latch = val;
s->write_state = RW_STATE_WORD1;
break;
case RW_STATE_WORD1:
- pit_load_count(kvm, addr, s->write_latch | (val << 8));
+ pit_load_count(pit, addr, s->write_latch | (val << 8));
s->write_state = RW_STATE_WORD0;
break;
}
@@ -529,7 +506,6 @@
{
struct kvm_pit *pit = dev_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
- struct kvm *kvm = pit->kvm;
int ret, count;
struct kvm_kpit_channel_state *s;
if (!pit_in_range(addr))
@@ -566,20 +542,20 @@
switch (s->read_state) {
default:
case RW_STATE_LSB:
- count = pit_get_count(kvm, addr);
+ count = pit_get_count(pit, addr);
ret = count & 0xff;
break;
case RW_STATE_MSB:
- count = pit_get_count(kvm, addr);
+ count = pit_get_count(pit, addr);
ret = (count >> 8) & 0xff;
break;
case RW_STATE_WORD0:
- count = pit_get_count(kvm, addr);
+ count = pit_get_count(pit, addr);
ret = count & 0xff;
s->read_state = RW_STATE_WORD1;
break;
case RW_STATE_WORD1:
- count = pit_get_count(kvm, addr);
+ count = pit_get_count(pit, addr);
ret = (count >> 8) & 0xff;
s->read_state = RW_STATE_WORD0;
break;
@@ -600,14 +576,13 @@
{
struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
- struct kvm *kvm = pit->kvm;
u32 val = *(u32 *) data;
if (addr != KVM_SPEAKER_BASE_ADDRESS)
return -EOPNOTSUPP;
mutex_lock(&pit_state->lock);
pit_state->speaker_data_on = (val >> 1) & 1;
- pit_set_gate(kvm, 2, val & 1);
+ pit_set_gate(pit, 2, val & 1);
mutex_unlock(&pit_state->lock);
return 0;
}
@@ -618,7 +593,6 @@
{
struct kvm_pit *pit = speaker_to_pit(this);
struct kvm_kpit_state *pit_state = &pit->pit_state;
- struct kvm *kvm = pit->kvm;
unsigned int refresh_clock;
int ret;
if (addr != KVM_SPEAKER_BASE_ADDRESS)
@@ -628,8 +602,8 @@
refresh_clock = ((unsigned int)ktime_to_ns(ktime_get()) >> 14) & 1;
mutex_lock(&pit_state->lock);
- ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(kvm, 2) |
- (pit_get_out(kvm, 2) << 5) | (refresh_clock << 4));
+ ret = ((pit_state->speaker_data_on << 1) | pit_get_gate(pit, 2) |
+ (pit_get_out(pit, 2) << 5) | (refresh_clock << 4));
if (len > sizeof(ret))
len = sizeof(ret);
memcpy(data, (char *)&ret, len);
@@ -637,33 +611,28 @@
return 0;
}
-void kvm_pit_reset(struct kvm_pit *pit)
+static void kvm_pit_reset(struct kvm_pit *pit)
{
int i;
struct kvm_kpit_channel_state *c;
- mutex_lock(&pit->pit_state.lock);
pit->pit_state.flags = 0;
for (i = 0; i < 3; i++) {
c = &pit->pit_state.channels[i];
c->mode = 0xff;
c->gate = (i != 2);
- pit_load_count(pit->kvm, i, 0);
+ pit_load_count(pit, i, 0);
}
- mutex_unlock(&pit->pit_state.lock);
- atomic_set(&pit->pit_state.pending, 0);
- pit->pit_state.irq_ack = 1;
+ kvm_pit_reset_reinject(pit);
}
static void pit_mask_notifer(struct kvm_irq_mask_notifier *kimn, bool mask)
{
struct kvm_pit *pit = container_of(kimn, struct kvm_pit, mask_notifier);
- if (!mask) {
- atomic_set(&pit->pit_state.pending, 0);
- pit->pit_state.irq_ack = 1;
- }
+ if (!mask)
+ kvm_pit_reset_reinject(pit);
}
static const struct kvm_io_device_ops pit_dev_ops = {
@@ -690,14 +659,10 @@
return NULL;
pit->irq_source_id = kvm_request_irq_source_id(kvm);
- if (pit->irq_source_id < 0) {
- kfree(pit);
- return NULL;
- }
+ if (pit->irq_source_id < 0)
+ goto fail_request;
mutex_init(&pit->pit_state.lock);
- mutex_lock(&pit->pit_state.lock);
- spin_lock_init(&pit->pit_state.inject_lock);
pid = get_pid(task_tgid(current));
pid_nr = pid_vnr(pid);
@@ -706,36 +671,30 @@
init_kthread_worker(&pit->worker);
pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
"kvm-pit/%d", pid_nr);
- if (IS_ERR(pit->worker_task)) {
- mutex_unlock(&pit->pit_state.lock);
- kvm_free_irq_source_id(kvm, pit->irq_source_id);
- kfree(pit);
- return NULL;
- }
+ if (IS_ERR(pit->worker_task))
+ goto fail_kthread;
+
init_kthread_work(&pit->expired, pit_do_work);
- kvm->arch.vpit = pit;
pit->kvm = kvm;
pit_state = &pit->pit_state;
- pit_state->pit = pit;
hrtimer_init(&pit_state->timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS);
+ pit_state->timer.function = pit_timer_fn;
+
pit_state->irq_ack_notifier.gsi = 0;
pit_state->irq_ack_notifier.irq_acked = kvm_pit_ack_irq;
- kvm_register_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
- pit_state->reinject = true;
- mutex_unlock(&pit->pit_state.lock);
+ pit->mask_notifier.func = pit_mask_notifer;
kvm_pit_reset(pit);
- pit->mask_notifier.func = pit_mask_notifer;
- kvm_register_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
+ kvm_pit_set_reinject(pit, true);
kvm_iodevice_init(&pit->dev, &pit_dev_ops);
ret = kvm_io_bus_register_dev(kvm, KVM_PIO_BUS, KVM_PIT_BASE_ADDRESS,
KVM_PIT_MEM_LENGTH, &pit->dev);
if (ret < 0)
- goto fail;
+ goto fail_register_pit;
if (flags & KVM_PIT_SPEAKER_DUMMY) {
kvm_iodevice_init(&pit->speaker_dev, &speaker_dev_ops);
@@ -743,42 +702,35 @@
KVM_SPEAKER_BASE_ADDRESS, 4,
&pit->speaker_dev);
if (ret < 0)
- goto fail_unregister;
+ goto fail_register_speaker;
}
return pit;
-fail_unregister:
+fail_register_speaker:
kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
-
-fail:
- kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
- kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
- kvm_free_irq_source_id(kvm, pit->irq_source_id);
+fail_register_pit:
+ kvm_pit_set_reinject(pit, false);
kthread_stop(pit->worker_task);
+fail_kthread:
+ kvm_free_irq_source_id(kvm, pit->irq_source_id);
+fail_request:
kfree(pit);
return NULL;
}
void kvm_free_pit(struct kvm *kvm)
{
- struct hrtimer *timer;
+ struct kvm_pit *pit = kvm->arch.vpit;
- if (kvm->arch.vpit) {
- kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &kvm->arch.vpit->dev);
- kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS,
- &kvm->arch.vpit->speaker_dev);
- kvm_unregister_irq_mask_notifier(kvm, 0,
- &kvm->arch.vpit->mask_notifier);
- kvm_unregister_irq_ack_notifier(kvm,
- &kvm->arch.vpit->pit_state.irq_ack_notifier);
- mutex_lock(&kvm->arch.vpit->pit_state.lock);
- timer = &kvm->arch.vpit->pit_state.timer;
- hrtimer_cancel(timer);
- flush_kthread_work(&kvm->arch.vpit->expired);
- kthread_stop(kvm->arch.vpit->worker_task);
- kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
- mutex_unlock(&kvm->arch.vpit->pit_state.lock);
- kfree(kvm->arch.vpit);
+ if (pit) {
+ kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->dev);
+ kvm_io_bus_unregister_dev(kvm, KVM_PIO_BUS, &pit->speaker_dev);
+ kvm_pit_set_reinject(pit, false);
+ hrtimer_cancel(&pit->pit_state.timer);
+ flush_kthread_work(&pit->expired);
+ kthread_stop(pit->worker_task);
+ kvm_free_irq_source_id(kvm, pit->irq_source_id);
+ kfree(pit);
}
}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index c84990b..2f5af07 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -22,19 +22,18 @@
};
struct kvm_kpit_state {
+ /* All members before "struct mutex lock" are protected by the lock. */
struct kvm_kpit_channel_state channels[3];
u32 flags;
bool is_periodic;
s64 period; /* unit: ns */
struct hrtimer timer;
- atomic_t pending; /* accumulated triggered timers */
- bool reinject;
- struct kvm *kvm;
u32 speaker_data_on;
+
struct mutex lock;
- struct kvm_pit *pit;
- spinlock_t inject_lock;
- unsigned long irq_ack;
+ atomic_t reinject;
+ atomic_t pending; /* accumulated triggered timers */
+ atomic_t irq_ack;
struct kvm_irq_ack_notifier irq_ack_notifier;
};
@@ -57,9 +56,11 @@
#define KVM_MAX_PIT_INTR_INTERVAL HZ / 100
#define KVM_PIT_CHANNEL_MASK 0x3
-void kvm_pit_load_count(struct kvm *kvm, int channel, u32 val, int hpet_legacy_start);
struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags);
void kvm_free_pit(struct kvm *kvm);
-void kvm_pit_reset(struct kvm_pit *pit);
+
+void kvm_pit_load_count(struct kvm_pit *pit, int channel, u32 val,
+ int hpet_legacy_start);
+void kvm_pit_set_reinject(struct kvm_pit *pit, bool reinject);
#endif
diff --git a/arch/x86/kvm/ioapic.c b/arch/x86/kvm/ioapic.c
index 1facfd6..9db47090 100644
--- a/arch/x86/kvm/ioapic.c
+++ b/arch/x86/kvm/ioapic.c
@@ -94,7 +94,7 @@
static void rtc_irq_eoi_tracking_reset(struct kvm_ioapic *ioapic)
{
ioapic->rtc_status.pending_eoi = 0;
- bitmap_zero(ioapic->rtc_status.dest_map, KVM_MAX_VCPUS);
+ bitmap_zero(ioapic->rtc_status.dest_map.map, KVM_MAX_VCPUS);
}
static void kvm_rtc_eoi_tracking_restore_all(struct kvm_ioapic *ioapic);
@@ -117,16 +117,16 @@
return;
new_val = kvm_apic_pending_eoi(vcpu, e->fields.vector);
- old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
+ old_val = test_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
if (new_val == old_val)
return;
if (new_val) {
- __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
+ __set_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
ioapic->rtc_status.pending_eoi++;
} else {
- __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map);
+ __clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map.map);
ioapic->rtc_status.pending_eoi--;
rtc_status_pending_eoi_check_valid(ioapic);
}
@@ -156,7 +156,8 @@
static void rtc_irq_eoi(struct kvm_ioapic *ioapic, struct kvm_vcpu *vcpu)
{
- if (test_and_clear_bit(vcpu->vcpu_id, ioapic->rtc_status.dest_map)) {
+ if (test_and_clear_bit(vcpu->vcpu_id,
+ ioapic->rtc_status.dest_map.map)) {
--ioapic->rtc_status.pending_eoi;
rtc_status_pending_eoi_check_valid(ioapic);
}
@@ -236,10 +237,17 @@
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu, ulong *ioapic_handled_vectors)
{
struct kvm_ioapic *ioapic = vcpu->kvm->arch.vioapic;
+ struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
union kvm_ioapic_redirect_entry *e;
int index;
spin_lock(&ioapic->lock);
+
+ /* Make sure we see any missing RTC EOI */
+ if (test_bit(vcpu->vcpu_id, dest_map->map))
+ __set_bit(dest_map->vectors[vcpu->vcpu_id],
+ ioapic_handled_vectors);
+
for (index = 0; index < IOAPIC_NUM_PINS; index++) {
e = &ioapic->redirtbl[index];
if (e->fields.trig_mode == IOAPIC_LEVEL_TRIG ||
@@ -346,7 +354,7 @@
*/
BUG_ON(ioapic->rtc_status.pending_eoi != 0);
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe,
- ioapic->rtc_status.dest_map);
+ &ioapic->rtc_status.dest_map);
ioapic->rtc_status.pending_eoi = (ret < 0 ? 0 : ret);
} else
ret = kvm_irq_delivery_to_apic(ioapic->kvm, NULL, &irqe, NULL);
@@ -407,8 +415,14 @@
static void __kvm_ioapic_update_eoi(struct kvm_vcpu *vcpu,
struct kvm_ioapic *ioapic, int vector, int trigger_mode)
{
- int i;
+ struct dest_map *dest_map = &ioapic->rtc_status.dest_map;
struct kvm_lapic *apic = vcpu->arch.apic;
+ int i;
+
+ /* RTC special handling */
+ if (test_bit(vcpu->vcpu_id, dest_map->map) &&
+ vector == dest_map->vectors[vcpu->vcpu_id])
+ rtc_irq_eoi(ioapic, vcpu);
for (i = 0; i < IOAPIC_NUM_PINS; i++) {
union kvm_ioapic_redirect_entry *ent = &ioapic->redirtbl[i];
@@ -416,8 +430,6 @@
if (ent->fields.vector != vector)
continue;
- if (i == RTC_GSI)
- rtc_irq_eoi(ioapic, vcpu);
/*
* We are dropping lock while calling ack notifiers because ack
* notifier callbacks for assigned devices call into IOAPIC
diff --git a/arch/x86/kvm/ioapic.h b/arch/x86/kvm/ioapic.h
index 2d16dc2..7d2692a 100644
--- a/arch/x86/kvm/ioapic.h
+++ b/arch/x86/kvm/ioapic.h
@@ -40,9 +40,21 @@
#define RTC_GSI -1U
#endif
+struct dest_map {
+ /* vcpu bitmap where IRQ has been sent */
+ DECLARE_BITMAP(map, KVM_MAX_VCPUS);
+
+ /*
+ * Vector sent to a given vcpu, only valid when
+ * the vcpu's bit in map is set
+ */
+ u8 vectors[KVM_MAX_VCPUS];
+};
+
+
struct rtc_status {
int pending_eoi;
- DECLARE_BITMAP(dest_map, KVM_MAX_VCPUS);
+ struct dest_map dest_map;
};
union kvm_ioapic_redirect_entry {
@@ -118,7 +130,8 @@
int level, bool line_status);
void kvm_ioapic_clear_all(struct kvm_ioapic *ioapic, int irq_source_id);
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
- struct kvm_lapic_irq *irq, unsigned long *dest_map);
+ struct kvm_lapic_irq *irq,
+ struct dest_map *dest_map);
int kvm_get_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
int kvm_set_ioapic(struct kvm *kvm, struct kvm_ioapic_state *state);
void kvm_ioapic_scan_entry(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/irq_comm.c b/arch/x86/kvm/irq_comm.c
index 3721736..54ead79 100644
--- a/arch/x86/kvm/irq_comm.c
+++ b/arch/x86/kvm/irq_comm.c
@@ -54,7 +54,7 @@
}
int kvm_irq_delivery_to_apic(struct kvm *kvm, struct kvm_lapic *src,
- struct kvm_lapic_irq *irq, unsigned long *dest_map)
+ struct kvm_lapic_irq *irq, struct dest_map *dest_map)
{
int i, r = -1;
struct kvm_vcpu *vcpu, *lowest = NULL;
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 1482a58..d9ae1ce 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -485,10 +485,10 @@
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
- unsigned long *dest_map);
+ struct dest_map *dest_map);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
- unsigned long *dest_map)
+ struct dest_map *dest_map)
{
struct kvm_lapic *apic = vcpu->arch.apic;
@@ -685,8 +685,17 @@
return idx;
}
+static void kvm_apic_disabled_lapic_found(struct kvm *kvm)
+{
+ if (!kvm->arch.disabled_lapic_found) {
+ kvm->arch.disabled_lapic_found = true;
+ printk(KERN_INFO
+ "Disabled LAPIC found during irq injection\n");
+ }
+}
+
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
- struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map)
+ struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map)
{
struct kvm_apic_map *map;
unsigned long bitmap = 1;
@@ -763,15 +772,8 @@
idx = kvm_vector_to_index(irq->vector,
dest_vcpus, &bitmap, 16);
- /*
- * We may find a hardware disabled LAPIC here, if that
- * is the case, print out a error message once for each
- * guest and return.
- */
- if (!dst[idx] && !kvm->arch.disabled_lapic_found) {
- kvm->arch.disabled_lapic_found = true;
- printk(KERN_INFO
- "Disabled LAPIC found during irq injection\n");
+ if (!dst[idx]) {
+ kvm_apic_disabled_lapic_found(kvm);
goto out;
}
@@ -859,16 +861,9 @@
idx = kvm_vector_to_index(irq->vector, dest_vcpus,
&bitmap, 16);
- /*
- * We may find a hardware disabled LAPIC here, if that
- * is the case, print out a error message once for each
- * guest and return
- */
dst = map->logical_map[cid][idx];
- if (!dst && !kvm->arch.disabled_lapic_found) {
- kvm->arch.disabled_lapic_found = true;
- printk(KERN_INFO
- "Disabled LAPIC found during irq injection\n");
+ if (!dst) {
+ kvm_apic_disabled_lapic_found(kvm);
goto out;
}
@@ -899,7 +894,7 @@
*/
static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
int vector, int level, int trig_mode,
- unsigned long *dest_map)
+ struct dest_map *dest_map)
{
int result = 0;
struct kvm_vcpu *vcpu = apic->vcpu;
@@ -919,8 +914,10 @@
result = 1;
- if (dest_map)
- __set_bit(vcpu->vcpu_id, dest_map);
+ if (dest_map) {
+ __set_bit(vcpu->vcpu_id, dest_map->map);
+ dest_map->vectors[vcpu->vcpu_id] = vector;
+ }
if (apic_test_vector(vector, apic->regs + APIC_TMR) != !!trig_mode) {
if (trig_mode)
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 5961009..f71183e 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -42,6 +42,9 @@
unsigned long pending_events;
unsigned int sipi_vector;
};
+
+struct dest_map;
+
int kvm_create_lapic(struct kvm_vcpu *vcpu);
void kvm_free_lapic(struct kvm_vcpu *vcpu);
@@ -60,11 +63,11 @@
void __kvm_apic_update_irr(u32 *pir, void *regs);
void kvm_apic_update_irr(struct kvm_vcpu *vcpu, u32 *pir);
int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq,
- unsigned long *dest_map);
+ struct dest_map *dest_map);
int kvm_apic_local_deliver(struct kvm_lapic *apic, int lvt_type);
bool kvm_irq_delivery_to_apic_fast(struct kvm *kvm, struct kvm_lapic *src,
- struct kvm_lapic_irq *irq, int *r, unsigned long *dest_map);
+ struct kvm_lapic_irq *irq, int *r, struct dest_map *dest_map);
u64 kvm_get_apic_base(struct kvm_vcpu *vcpu);
int kvm_set_apic_base(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 95a955d..2463de0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -41,6 +41,7 @@
#include <asm/cmpxchg.h>
#include <asm/io.h>
#include <asm/vmx.h>
+#include <asm/kvm_page_track.h>
/*
* When setting this variable to true it enables Two-Dimensional-Paging
@@ -776,62 +777,85 @@
return &slot->arch.lpage_info[level - 2][idx];
}
+static void update_gfn_disallow_lpage_count(struct kvm_memory_slot *slot,
+ gfn_t gfn, int count)
+{
+ struct kvm_lpage_info *linfo;
+ int i;
+
+ for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
+ linfo = lpage_info_slot(gfn, slot, i);
+ linfo->disallow_lpage += count;
+ WARN_ON(linfo->disallow_lpage < 0);
+ }
+}
+
+void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ update_gfn_disallow_lpage_count(slot, gfn, 1);
+}
+
+void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn)
+{
+ update_gfn_disallow_lpage_count(slot, gfn, -1);
+}
+
static void account_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
- struct kvm_lpage_info *linfo;
gfn_t gfn;
- int i;
+ kvm->arch.indirect_shadow_pages++;
gfn = sp->gfn;
slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn);
- for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
- linfo = lpage_info_slot(gfn, slot, i);
- linfo->write_count += 1;
- }
- kvm->arch.indirect_shadow_pages++;
+
+ /* the non-leaf shadow pages are keeping readonly. */
+ if (sp->role.level > PT_PAGE_TABLE_LEVEL)
+ return kvm_slot_page_track_add_page(kvm, slot, gfn,
+ KVM_PAGE_TRACK_WRITE);
+
+ kvm_mmu_gfn_disallow_lpage(slot, gfn);
}
static void unaccount_shadowed(struct kvm *kvm, struct kvm_mmu_page *sp)
{
struct kvm_memslots *slots;
struct kvm_memory_slot *slot;
- struct kvm_lpage_info *linfo;
gfn_t gfn;
- int i;
+ kvm->arch.indirect_shadow_pages--;
gfn = sp->gfn;
slots = kvm_memslots_for_spte_role(kvm, sp->role);
slot = __gfn_to_memslot(slots, gfn);
- for (i = PT_DIRECTORY_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
- linfo = lpage_info_slot(gfn, slot, i);
- linfo->write_count -= 1;
- WARN_ON(linfo->write_count < 0);
- }
- kvm->arch.indirect_shadow_pages--;
+ if (sp->role.level > PT_PAGE_TABLE_LEVEL)
+ return kvm_slot_page_track_remove_page(kvm, slot, gfn,
+ KVM_PAGE_TRACK_WRITE);
+
+ kvm_mmu_gfn_allow_lpage(slot, gfn);
}
-static int __has_wrprotected_page(gfn_t gfn, int level,
- struct kvm_memory_slot *slot)
+static bool __mmu_gfn_lpage_is_disallowed(gfn_t gfn, int level,
+ struct kvm_memory_slot *slot)
{
struct kvm_lpage_info *linfo;
if (slot) {
linfo = lpage_info_slot(gfn, slot, level);
- return linfo->write_count;
+ return !!linfo->disallow_lpage;
}
- return 1;
+ return true;
}
-static int has_wrprotected_page(struct kvm_vcpu *vcpu, gfn_t gfn, int level)
+static bool mmu_gfn_lpage_is_disallowed(struct kvm_vcpu *vcpu, gfn_t gfn,
+ int level)
{
struct kvm_memory_slot *slot;
slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
- return __has_wrprotected_page(gfn, level, slot);
+ return __mmu_gfn_lpage_is_disallowed(gfn, level, slot);
}
static int host_mapping_level(struct kvm *kvm, gfn_t gfn)
@@ -897,7 +921,7 @@
max_level = min(kvm_x86_ops->get_lpage_level(), host_level);
for (level = PT_DIRECTORY_LEVEL; level <= max_level; ++level)
- if (__has_wrprotected_page(large_gfn, level, slot))
+ if (__mmu_gfn_lpage_is_disallowed(large_gfn, level, slot))
break;
return level - 1;
@@ -1323,23 +1347,29 @@
kvm_mmu_write_protect_pt_masked(kvm, slot, gfn_offset, mask);
}
-static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
+bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
+ struct kvm_memory_slot *slot, u64 gfn)
{
- struct kvm_memory_slot *slot;
struct kvm_rmap_head *rmap_head;
int i;
bool write_protected = false;
- slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
-
for (i = PT_PAGE_TABLE_LEVEL; i <= PT_MAX_HUGEPAGE_LEVEL; ++i) {
rmap_head = __gfn_to_rmap(gfn, i, slot);
- write_protected |= __rmap_write_protect(vcpu->kvm, rmap_head, true);
+ write_protected |= __rmap_write_protect(kvm, rmap_head, true);
}
return write_protected;
}
+static bool rmap_write_protect(struct kvm_vcpu *vcpu, u64 gfn)
+{
+ struct kvm_memory_slot *slot;
+
+ slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ return kvm_mmu_slot_gfn_write_protect(vcpu->kvm, slot, gfn);
+}
+
static bool kvm_zap_rmapp(struct kvm *kvm, struct kvm_rmap_head *rmap_head)
{
u64 *sptep;
@@ -1754,7 +1784,7 @@
static int nonpaging_sync_page(struct kvm_vcpu *vcpu,
struct kvm_mmu_page *sp)
{
- return 1;
+ return 0;
}
static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
@@ -1840,13 +1870,16 @@
return nr_unsync_leaf;
}
+#define INVALID_INDEX (-1)
+
static int mmu_unsync_walk(struct kvm_mmu_page *sp,
struct kvm_mmu_pages *pvec)
{
+ pvec->nr = 0;
if (!sp->unsync_children)
return 0;
- mmu_pages_add(pvec, sp, 0);
+ mmu_pages_add(pvec, sp, INVALID_INDEX);
return __mmu_unsync_walk(sp, pvec);
}
@@ -1883,37 +1916,35 @@
if ((_sp)->role.direct || (_sp)->role.invalid) {} else
/* @sp->gfn should be write-protected at the call site */
-static int __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
- struct list_head *invalid_list, bool clear_unsync)
+static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+ struct list_head *invalid_list)
{
if (sp->role.cr4_pae != !!is_pae(vcpu)) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
- return 1;
+ return false;
}
- if (clear_unsync)
- kvm_unlink_unsync_page(vcpu->kvm, sp);
-
- if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
+ if (vcpu->arch.mmu.sync_page(vcpu, sp) == 0) {
kvm_mmu_prepare_zap_page(vcpu->kvm, sp, invalid_list);
- return 1;
+ return false;
}
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
- return 0;
+ return true;
}
-static int kvm_sync_page_transient(struct kvm_vcpu *vcpu,
- struct kvm_mmu_page *sp)
+static void kvm_mmu_flush_or_zap(struct kvm_vcpu *vcpu,
+ struct list_head *invalid_list,
+ bool remote_flush, bool local_flush)
{
- LIST_HEAD(invalid_list);
- int ret;
+ if (!list_empty(invalid_list)) {
+ kvm_mmu_commit_zap_page(vcpu->kvm, invalid_list);
+ return;
+ }
- ret = __kvm_sync_page(vcpu, sp, &invalid_list, false);
- if (ret)
- kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
-
- return ret;
+ if (remote_flush)
+ kvm_flush_remote_tlbs(vcpu->kvm);
+ else if (local_flush)
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
}
#ifdef CONFIG_KVM_MMU_AUDIT
@@ -1923,46 +1954,38 @@
static void mmu_audit_disable(void) { }
#endif
-static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
+static bool kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
struct list_head *invalid_list)
{
- return __kvm_sync_page(vcpu, sp, invalid_list, true);
+ kvm_unlink_unsync_page(vcpu->kvm, sp);
+ return __kvm_sync_page(vcpu, sp, invalid_list);
}
/* @gfn should be write-protected at the call site */
-static void kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
+static bool kvm_sync_pages(struct kvm_vcpu *vcpu, gfn_t gfn,
+ struct list_head *invalid_list)
{
struct kvm_mmu_page *s;
- LIST_HEAD(invalid_list);
- bool flush = false;
+ bool ret = false;
for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
if (!s->unsync)
continue;
WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
- kvm_unlink_unsync_page(vcpu->kvm, s);
- if ((s->role.cr4_pae != !!is_pae(vcpu)) ||
- (vcpu->arch.mmu.sync_page(vcpu, s))) {
- kvm_mmu_prepare_zap_page(vcpu->kvm, s, &invalid_list);
- continue;
- }
- flush = true;
+ ret |= kvm_sync_page(vcpu, s, invalid_list);
}
- kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
- if (flush)
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ return ret;
}
struct mmu_page_path {
- struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1];
- unsigned int idx[PT64_ROOT_LEVEL-1];
+ struct kvm_mmu_page *parent[PT64_ROOT_LEVEL];
+ unsigned int idx[PT64_ROOT_LEVEL];
};
#define for_each_sp(pvec, sp, parents, i) \
- for (i = mmu_pages_next(&pvec, &parents, -1), \
- sp = pvec.page[i].sp; \
+ for (i = mmu_pages_first(&pvec, &parents); \
i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \
i = mmu_pages_next(&pvec, &parents, i))
@@ -1974,19 +1997,43 @@
for (n = i+1; n < pvec->nr; n++) {
struct kvm_mmu_page *sp = pvec->page[n].sp;
+ unsigned idx = pvec->page[n].idx;
+ int level = sp->role.level;
- if (sp->role.level == PT_PAGE_TABLE_LEVEL) {
- parents->idx[0] = pvec->page[n].idx;
- return n;
- }
+ parents->idx[level-1] = idx;
+ if (level == PT_PAGE_TABLE_LEVEL)
+ break;
- parents->parent[sp->role.level-2] = sp;
- parents->idx[sp->role.level-1] = pvec->page[n].idx;
+ parents->parent[level-2] = sp;
}
return n;
}
+static int mmu_pages_first(struct kvm_mmu_pages *pvec,
+ struct mmu_page_path *parents)
+{
+ struct kvm_mmu_page *sp;
+ int level;
+
+ if (pvec->nr == 0)
+ return 0;
+
+ WARN_ON(pvec->page[0].idx != INVALID_INDEX);
+
+ sp = pvec->page[0].sp;
+ level = sp->role.level;
+ WARN_ON(level == PT_PAGE_TABLE_LEVEL);
+
+ parents->parent[level-2] = sp;
+
+ /* Also set up a sentinel. Further entries in pvec are all
+ * children of sp, so this element is never overwritten.
+ */
+ parents->parent[level-1] = NULL;
+ return mmu_pages_next(pvec, parents, 0);
+}
+
static void mmu_pages_clear_parents(struct mmu_page_path *parents)
{
struct kvm_mmu_page *sp;
@@ -1994,22 +2041,14 @@
do {
unsigned int idx = parents->idx[level];
-
sp = parents->parent[level];
if (!sp)
return;
+ WARN_ON(idx == INVALID_INDEX);
clear_unsync_child_bit(sp, idx);
level++;
- } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children);
-}
-
-static void kvm_mmu_pages_init(struct kvm_mmu_page *parent,
- struct mmu_page_path *parents,
- struct kvm_mmu_pages *pvec)
-{
- parents->parent[parent->role.level-1] = NULL;
- pvec->nr = 0;
+ } while (!sp->unsync_children);
}
static void mmu_sync_children(struct kvm_vcpu *vcpu,
@@ -2020,30 +2059,36 @@
struct mmu_page_path parents;
struct kvm_mmu_pages pages;
LIST_HEAD(invalid_list);
+ bool flush = false;
- kvm_mmu_pages_init(parent, &parents, &pages);
while (mmu_unsync_walk(parent, &pages)) {
bool protected = false;
for_each_sp(pages, sp, parents, i)
protected |= rmap_write_protect(vcpu, sp->gfn);
- if (protected)
+ if (protected) {
kvm_flush_remote_tlbs(vcpu->kvm);
+ flush = false;
+ }
for_each_sp(pages, sp, parents, i) {
- kvm_sync_page(vcpu, sp, &invalid_list);
+ flush |= kvm_sync_page(vcpu, sp, &invalid_list);
mmu_pages_clear_parents(&parents);
}
- kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
- cond_resched_lock(&vcpu->kvm->mmu_lock);
- kvm_mmu_pages_init(parent, &parents, &pages);
+ if (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)) {
+ kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
+ cond_resched_lock(&vcpu->kvm->mmu_lock);
+ flush = false;
+ }
}
+
+ kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
}
static void __clear_sp_write_flooding_count(struct kvm_mmu_page *sp)
{
- sp->write_flooding_count = 0;
+ atomic_set(&sp->write_flooding_count, 0);
}
static void clear_sp_write_flooding_count(u64 *spte)
@@ -2069,6 +2114,8 @@
unsigned quadrant;
struct kvm_mmu_page *sp;
bool need_sync = false;
+ bool flush = false;
+ LIST_HEAD(invalid_list);
role = vcpu->arch.mmu.base_role;
role.level = level;
@@ -2092,8 +2139,16 @@
if (sp->role.word != role.word)
continue;
- if (sp->unsync && kvm_sync_page_transient(vcpu, sp))
- break;
+ if (sp->unsync) {
+ /* The page is good, but __kvm_sync_page might still end
+ * up zapping it. If so, break in order to rebuild it.
+ */
+ if (!__kvm_sync_page(vcpu, sp, &invalid_list))
+ break;
+
+ WARN_ON(!list_empty(&invalid_list));
+ kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
+ }
if (sp->unsync_children)
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
@@ -2112,16 +2167,24 @@
hlist_add_head(&sp->hash_link,
&vcpu->kvm->arch.mmu_page_hash[kvm_page_table_hashfn(gfn)]);
if (!direct) {
- if (rmap_write_protect(vcpu, gfn))
- kvm_flush_remote_tlbs(vcpu->kvm);
- if (level > PT_PAGE_TABLE_LEVEL && need_sync)
- kvm_sync_pages(vcpu, gfn);
-
+ /*
+ * we should do write protection before syncing pages
+ * otherwise the content of the synced shadow page may
+ * be inconsistent with guest page table.
+ */
account_shadowed(vcpu->kvm, sp);
+ if (level == PT_PAGE_TABLE_LEVEL &&
+ rmap_write_protect(vcpu, gfn))
+ kvm_flush_remote_tlbs(vcpu->kvm);
+
+ if (level > PT_PAGE_TABLE_LEVEL && need_sync)
+ flush |= kvm_sync_pages(vcpu, gfn, &invalid_list);
}
sp->mmu_valid_gen = vcpu->kvm->arch.mmu_valid_gen;
clear_page(sp->spt);
trace_kvm_mmu_get_page(sp, true);
+
+ kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
return sp;
}
@@ -2269,7 +2332,6 @@
if (parent->role.level == PT_PAGE_TABLE_LEVEL)
return 0;
- kvm_mmu_pages_init(parent, &parents, &pages);
while (mmu_unsync_walk(parent, &pages)) {
struct kvm_mmu_page *sp;
@@ -2278,7 +2340,6 @@
mmu_pages_clear_parents(&parents);
zapped++;
}
- kvm_mmu_pages_init(parent, &parents, &pages);
}
return zapped;
@@ -2354,8 +2415,8 @@
if (list_empty(&kvm->arch.active_mmu_pages))
return false;
- sp = list_entry(kvm->arch.active_mmu_pages.prev,
- struct kvm_mmu_page, link);
+ sp = list_last_entry(&kvm->arch.active_mmu_pages,
+ struct kvm_mmu_page, link);
kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
return true;
@@ -2408,7 +2469,7 @@
}
EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page);
-static void __kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
+static void kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
{
trace_kvm_mmu_unsync_page(sp);
++vcpu->kvm->stat.mmu_unsync;
@@ -2417,37 +2478,26 @@
kvm_mmu_mark_parents_unsync(sp);
}
-static void kvm_unsync_pages(struct kvm_vcpu *vcpu, gfn_t gfn)
+static bool mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
+ bool can_unsync)
{
- struct kvm_mmu_page *s;
+ struct kvm_mmu_page *sp;
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
- if (s->unsync)
- continue;
- WARN_ON(s->role.level != PT_PAGE_TABLE_LEVEL);
- __kvm_unsync_page(vcpu, s);
- }
-}
+ if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+ return true;
-static int mmu_need_write_protect(struct kvm_vcpu *vcpu, gfn_t gfn,
- bool can_unsync)
-{
- struct kvm_mmu_page *s;
- bool need_unsync = false;
-
- for_each_gfn_indirect_valid_sp(vcpu->kvm, s, gfn) {
+ for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (!can_unsync)
- return 1;
+ return true;
- if (s->role.level != PT_PAGE_TABLE_LEVEL)
- return 1;
+ if (sp->unsync)
+ continue;
- if (!s->unsync)
- need_unsync = true;
+ WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
+ kvm_unsync_page(vcpu, sp);
}
- if (need_unsync)
- kvm_unsync_pages(vcpu, gfn);
- return 0;
+
+ return false;
}
static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
@@ -2503,7 +2553,7 @@
* be fixed if guest refault.
*/
if (level > PT_PAGE_TABLE_LEVEL &&
- has_wrprotected_page(vcpu, gfn, level))
+ mmu_gfn_lpage_is_disallowed(vcpu, gfn, level))
goto done;
spte |= PT_WRITABLE_MASK | SPTE_MMU_WRITEABLE;
@@ -2768,7 +2818,7 @@
if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
level == PT_PAGE_TABLE_LEVEL &&
PageTransCompound(pfn_to_page(pfn)) &&
- !has_wrprotected_page(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
+ !mmu_gfn_lpage_is_disallowed(vcpu, gfn, PT_DIRECTORY_LEVEL)) {
unsigned long mask;
/*
* mmu_notifier_retry was successful and we hold the
@@ -2796,20 +2846,16 @@
static bool handle_abnormal_pfn(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn,
kvm_pfn_t pfn, unsigned access, int *ret_val)
{
- bool ret = true;
-
/* The pfn is invalid, report the error! */
if (unlikely(is_error_pfn(pfn))) {
*ret_val = kvm_handle_bad_page(vcpu, gfn, pfn);
- goto exit;
+ return true;
}
if (unlikely(is_noslot_pfn(pfn)))
vcpu_cache_mmio_info(vcpu, gva, gfn, access);
- ret = false;
-exit:
- return ret;
+ return false;
}
static bool page_fault_can_be_fast(u32 error_code)
@@ -3273,7 +3319,7 @@
return __is_rsvd_bits_set(&mmu->shadow_zero_check, spte, level);
}
-static bool quickly_check_mmio_pf(struct kvm_vcpu *vcpu, u64 addr, bool direct)
+static bool mmio_info_in_cache(struct kvm_vcpu *vcpu, u64 addr, bool direct)
{
if (direct)
return vcpu_match_mmio_gpa(vcpu, addr);
@@ -3332,7 +3378,7 @@
u64 spte;
bool reserved;
- if (quickly_check_mmio_pf(vcpu, addr, direct))
+ if (mmio_info_in_cache(vcpu, addr, direct))
return RET_MMIO_PF_EMULATE;
reserved = walk_shadow_page_get_mmio_spte(vcpu, addr, &spte);
@@ -3362,20 +3408,53 @@
}
EXPORT_SYMBOL_GPL(handle_mmio_page_fault);
+static bool page_fault_handle_page_track(struct kvm_vcpu *vcpu,
+ u32 error_code, gfn_t gfn)
+{
+ if (unlikely(error_code & PFERR_RSVD_MASK))
+ return false;
+
+ if (!(error_code & PFERR_PRESENT_MASK) ||
+ !(error_code & PFERR_WRITE_MASK))
+ return false;
+
+ /*
+ * guest is writing the page which is write tracked which can
+ * not be fixed by page fault handler.
+ */
+ if (kvm_page_track_is_active(vcpu, gfn, KVM_PAGE_TRACK_WRITE))
+ return true;
+
+ return false;
+}
+
+static void shadow_page_table_clear_flood(struct kvm_vcpu *vcpu, gva_t addr)
+{
+ struct kvm_shadow_walk_iterator iterator;
+ u64 spte;
+
+ if (!VALID_PAGE(vcpu->arch.mmu.root_hpa))
+ return;
+
+ walk_shadow_page_lockless_begin(vcpu);
+ for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
+ clear_sp_write_flooding_count(iterator.sptep);
+ if (!is_shadow_present_pte(spte))
+ break;
+ }
+ walk_shadow_page_lockless_end(vcpu);
+}
+
static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
u32 error_code, bool prefault)
{
- gfn_t gfn;
+ gfn_t gfn = gva >> PAGE_SHIFT;
int r;
pgprintk("%s: gva %lx error %x\n", __func__, gva, error_code);
- if (unlikely(error_code & PFERR_RSVD_MASK)) {
- r = handle_mmio_page_fault(vcpu, gva, true);
-
- if (likely(r != RET_MMIO_PF_INVALID))
- return r;
- }
+ if (page_fault_handle_page_track(vcpu, error_code, gfn))
+ return 1;
r = mmu_topup_memory_caches(vcpu);
if (r)
@@ -3383,7 +3462,6 @@
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
- gfn = gva >> PAGE_SHIFT;
return nonpaging_map(vcpu, gva & PAGE_MASK,
error_code, gfn, prefault);
@@ -3460,12 +3538,8 @@
MMU_WARN_ON(!VALID_PAGE(vcpu->arch.mmu.root_hpa));
- if (unlikely(error_code & PFERR_RSVD_MASK)) {
- r = handle_mmio_page_fault(vcpu, gpa, true);
-
- if (likely(r != RET_MMIO_PF_INVALID))
- return r;
- }
+ if (page_fault_handle_page_track(vcpu, error_code, gfn))
+ return 1;
r = mmu_topup_memory_caches(vcpu);
if (r)
@@ -3558,13 +3632,24 @@
return false;
}
-static inline bool is_last_gpte(struct kvm_mmu *mmu, unsigned level, unsigned gpte)
+static inline bool is_last_gpte(struct kvm_mmu *mmu,
+ unsigned level, unsigned gpte)
{
- unsigned index;
+ /*
+ * PT_PAGE_TABLE_LEVEL always terminates. The RHS has bit 7 set
+ * iff level <= PT_PAGE_TABLE_LEVEL, which for our purpose means
+ * level == PT_PAGE_TABLE_LEVEL; set PT_PAGE_SIZE_MASK in gpte then.
+ */
+ gpte |= level - PT_PAGE_TABLE_LEVEL - 1;
- index = level - 1;
- index |= (gpte & PT_PAGE_SIZE_MASK) >> (PT_PAGE_SIZE_SHIFT - 2);
- return mmu->last_pte_bitmap & (1 << index);
+ /*
+ * The RHS has bit 7 set iff level < mmu->last_nonleaf_level.
+ * If it is clear, there are no large pages at this level, so clear
+ * PT_PAGE_SIZE_MASK in gpte if that is the case.
+ */
+ gpte &= level - mmu->last_nonleaf_level;
+
+ return gpte & PT_PAGE_SIZE_MASK;
}
#define PTTYPE_EPT 18 /* arbitrary */
@@ -3836,22 +3921,13 @@
}
}
-static void update_last_pte_bitmap(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
+static void update_last_nonleaf_level(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu)
{
- u8 map;
- unsigned level, root_level = mmu->root_level;
- const unsigned ps_set_index = 1 << 2; /* bit 2 of index: ps */
+ unsigned root_level = mmu->root_level;
- if (root_level == PT32E_ROOT_LEVEL)
- --root_level;
- /* PT_PAGE_TABLE_LEVEL always terminates */
- map = 1 | (1 << ps_set_index);
- for (level = PT_DIRECTORY_LEVEL; level <= root_level; ++level) {
- if (level <= PT_PDPE_LEVEL
- && (mmu->root_level >= PT32E_ROOT_LEVEL || is_pse(vcpu)))
- map |= 1 << (ps_set_index | (level - 1));
- }
- mmu->last_pte_bitmap = map;
+ mmu->last_nonleaf_level = root_level;
+ if (root_level == PT32_ROOT_LEVEL && is_pse(vcpu))
+ mmu->last_nonleaf_level++;
}
static void paging64_init_context_common(struct kvm_vcpu *vcpu,
@@ -3863,7 +3939,7 @@
reset_rsvds_bits_mask(vcpu, context);
update_permission_bitmask(vcpu, context, false);
- update_last_pte_bitmap(vcpu, context);
+ update_last_nonleaf_level(vcpu, context);
MMU_WARN_ON(!is_pae(vcpu));
context->page_fault = paging64_page_fault;
@@ -3890,7 +3966,7 @@
reset_rsvds_bits_mask(vcpu, context);
update_permission_bitmask(vcpu, context, false);
- update_last_pte_bitmap(vcpu, context);
+ update_last_nonleaf_level(vcpu, context);
context->page_fault = paging32_page_fault;
context->gva_to_gpa = paging32_gva_to_gpa;
@@ -3948,7 +4024,7 @@
}
update_permission_bitmask(vcpu, context, false);
- update_last_pte_bitmap(vcpu, context);
+ update_last_nonleaf_level(vcpu, context);
reset_tdp_shadow_zero_bits_mask(vcpu, context);
}
@@ -4054,7 +4130,7 @@
}
update_permission_bitmask(vcpu, g_context, false);
- update_last_pte_bitmap(vcpu, g_context);
+ update_last_nonleaf_level(vcpu, g_context);
}
static void init_kvm_mmu(struct kvm_vcpu *vcpu)
@@ -4125,18 +4201,6 @@
return (old & ~new & PT64_PERM_MASK) != 0;
}
-static void mmu_pte_write_flush_tlb(struct kvm_vcpu *vcpu, bool zap_page,
- bool remote_flush, bool local_flush)
-{
- if (zap_page)
- return;
-
- if (remote_flush)
- kvm_flush_remote_tlbs(vcpu->kvm);
- else if (local_flush)
- kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
-}
-
static u64 mmu_pte_write_fetch_gpte(struct kvm_vcpu *vcpu, gpa_t *gpa,
const u8 *new, int *bytes)
{
@@ -4186,7 +4250,8 @@
if (sp->role.level == PT_PAGE_TABLE_LEVEL)
return false;
- return ++sp->write_flooding_count >= 3;
+ atomic_inc(&sp->write_flooding_count);
+ return atomic_read(&sp->write_flooding_count) >= 3;
}
/*
@@ -4248,15 +4313,15 @@
return spte;
}
-void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
- const u8 *new, int bytes)
+static void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
+ const u8 *new, int bytes)
{
gfn_t gfn = gpa >> PAGE_SHIFT;
struct kvm_mmu_page *sp;
LIST_HEAD(invalid_list);
u64 entry, gentry, *spte;
int npte;
- bool remote_flush, local_flush, zap_page;
+ bool remote_flush, local_flush;
union kvm_mmu_page_role mask = { };
mask.cr0_wp = 1;
@@ -4273,7 +4338,7 @@
if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages))
return;
- zap_page = remote_flush = local_flush = false;
+ remote_flush = local_flush = false;
pgprintk("%s: gpa %llx bytes %d\n", __func__, gpa, bytes);
@@ -4293,8 +4358,7 @@
for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
if (detect_write_misaligned(sp, gpa, bytes) ||
detect_write_flooding(sp)) {
- zap_page |= !!kvm_mmu_prepare_zap_page(vcpu->kvm, sp,
- &invalid_list);
+ kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
++vcpu->kvm->stat.mmu_flooded;
continue;
}
@@ -4316,8 +4380,7 @@
++spte;
}
}
- mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
- kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
+ kvm_mmu_flush_or_zap(vcpu, &invalid_list, remote_flush, local_flush);
kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
spin_unlock(&vcpu->kvm->mmu_lock);
}
@@ -4354,32 +4417,34 @@
kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
}
-static bool is_mmio_page_fault(struct kvm_vcpu *vcpu, gva_t addr)
-{
- if (vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu))
- return vcpu_match_mmio_gpa(vcpu, addr);
-
- return vcpu_match_mmio_gva(vcpu, addr);
-}
-
int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code,
void *insn, int insn_len)
{
int r, emulation_type = EMULTYPE_RETRY;
enum emulation_result er;
+ bool direct = vcpu->arch.mmu.direct_map || mmu_is_nested(vcpu);
+
+ if (unlikely(error_code & PFERR_RSVD_MASK)) {
+ r = handle_mmio_page_fault(vcpu, cr2, direct);
+ if (r == RET_MMIO_PF_EMULATE) {
+ emulation_type = 0;
+ goto emulate;
+ }
+ if (r == RET_MMIO_PF_RETRY)
+ return 1;
+ if (r < 0)
+ return r;
+ }
r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
if (r < 0)
- goto out;
+ return r;
+ if (!r)
+ return 1;
- if (!r) {
- r = 1;
- goto out;
- }
-
- if (is_mmio_page_fault(vcpu, cr2))
+ if (mmio_info_in_cache(vcpu, cr2, direct))
emulation_type = 0;
-
+emulate:
er = x86_emulate_instruction(vcpu, cr2, emulation_type, insn, insn_len);
switch (er) {
@@ -4393,8 +4458,6 @@
default:
BUG();
}
-out:
- return r;
}
EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
@@ -4463,6 +4526,21 @@
init_kvm_mmu(vcpu);
}
+void kvm_mmu_init_vm(struct kvm *kvm)
+{
+ struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+
+ node->track_write = kvm_mmu_pte_write;
+ kvm_page_track_register_notifier(kvm, node);
+}
+
+void kvm_mmu_uninit_vm(struct kvm *kvm)
+{
+ struct kvm_page_track_notifier_node *node = &kvm->arch.mmu_sp_tracker;
+
+ kvm_page_track_unregister_notifier(kvm, node);
+}
+
/* The return value indicates if tlb flush on all vcpus is needed. */
typedef bool (*slot_level_handler) (struct kvm *kvm, struct kvm_rmap_head *rmap_head);
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 55ffb7b..58fe98a 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -174,4 +174,9 @@
void kvm_mmu_invalidate_zap_all_pages(struct kvm *kvm);
void kvm_zap_gfn_range(struct kvm *kvm, gfn_t gfn_start, gfn_t gfn_end);
+
+void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
+void kvm_mmu_gfn_allow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
+bool kvm_mmu_slot_gfn_write_protect(struct kvm *kvm,
+ struct kvm_memory_slot *slot, u64 gfn);
#endif
diff --git a/arch/x86/kvm/page_track.c b/arch/x86/kvm/page_track.c
new file mode 100644
index 0000000..11f7643
--- /dev/null
+++ b/arch/x86/kvm/page_track.c
@@ -0,0 +1,222 @@
+/*
+ * Support KVM gust page tracking
+ *
+ * This feature allows us to track page access in guest. Currently, only
+ * write access is tracked.
+ *
+ * Copyright(C) 2015 Intel Corporation.
+ *
+ * Author:
+ * Xiao Guangrong <guangrong.xiao@linux.intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/kvm_host.h>
+#include <asm/kvm_page_track.h>
+
+#include "mmu.h"
+
+void kvm_page_track_free_memslot(struct kvm_memory_slot *free,
+ struct kvm_memory_slot *dont)
+{
+ int i;
+
+ for (i = 0; i < KVM_PAGE_TRACK_MAX; i++)
+ if (!dont || free->arch.gfn_track[i] !=
+ dont->arch.gfn_track[i]) {
+ kvfree(free->arch.gfn_track[i]);
+ free->arch.gfn_track[i] = NULL;
+ }
+}
+
+int kvm_page_track_create_memslot(struct kvm_memory_slot *slot,
+ unsigned long npages)
+{
+ int i;
+
+ for (i = 0; i < KVM_PAGE_TRACK_MAX; i++) {
+ slot->arch.gfn_track[i] = kvm_kvzalloc(npages *
+ sizeof(*slot->arch.gfn_track[i]));
+ if (!slot->arch.gfn_track[i])
+ goto track_free;
+ }
+
+ return 0;
+
+track_free:
+ kvm_page_track_free_memslot(slot, NULL);
+ return -ENOMEM;
+}
+
+static inline bool page_track_mode_is_valid(enum kvm_page_track_mode mode)
+{
+ if (mode < 0 || mode >= KVM_PAGE_TRACK_MAX)
+ return false;
+
+ return true;
+}
+
+static void update_gfn_track(struct kvm_memory_slot *slot, gfn_t gfn,
+ enum kvm_page_track_mode mode, short count)
+{
+ int index, val;
+
+ index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
+
+ val = slot->arch.gfn_track[mode][index];
+
+ if (WARN_ON(val + count < 0 || val + count > USHRT_MAX))
+ return;
+
+ slot->arch.gfn_track[mode][index] += count;
+}
+
+/*
+ * add guest page to the tracking pool so that corresponding access on that
+ * page will be intercepted.
+ *
+ * It should be called under the protection both of mmu-lock and kvm->srcu
+ * or kvm->slots_lock.
+ *
+ * @kvm: the guest instance we are interested in.
+ * @slot: the @gfn belongs to.
+ * @gfn: the guest page.
+ * @mode: tracking mode, currently only write track is supported.
+ */
+void kvm_slot_page_track_add_page(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ enum kvm_page_track_mode mode)
+{
+
+ if (WARN_ON(!page_track_mode_is_valid(mode)))
+ return;
+
+ update_gfn_track(slot, gfn, mode, 1);
+
+ /*
+ * new track stops large page mapping for the
+ * tracked page.
+ */
+ kvm_mmu_gfn_disallow_lpage(slot, gfn);
+
+ if (mode == KVM_PAGE_TRACK_WRITE)
+ if (kvm_mmu_slot_gfn_write_protect(kvm, slot, gfn))
+ kvm_flush_remote_tlbs(kvm);
+}
+
+/*
+ * remove the guest page from the tracking pool which stops the interception
+ * of corresponding access on that page. It is the opposed operation of
+ * kvm_slot_page_track_add_page().
+ *
+ * It should be called under the protection both of mmu-lock and kvm->srcu
+ * or kvm->slots_lock.
+ *
+ * @kvm: the guest instance we are interested in.
+ * @slot: the @gfn belongs to.
+ * @gfn: the guest page.
+ * @mode: tracking mode, currently only write track is supported.
+ */
+void kvm_slot_page_track_remove_page(struct kvm *kvm,
+ struct kvm_memory_slot *slot, gfn_t gfn,
+ enum kvm_page_track_mode mode)
+{
+ if (WARN_ON(!page_track_mode_is_valid(mode)))
+ return;
+
+ update_gfn_track(slot, gfn, mode, -1);
+
+ /*
+ * allow large page mapping for the tracked page
+ * after the tracker is gone.
+ */
+ kvm_mmu_gfn_allow_lpage(slot, gfn);
+}
+
+/*
+ * check if the corresponding access on the specified guest page is tracked.
+ */
+bool kvm_page_track_is_active(struct kvm_vcpu *vcpu, gfn_t gfn,
+ enum kvm_page_track_mode mode)
+{
+ struct kvm_memory_slot *slot = kvm_vcpu_gfn_to_memslot(vcpu, gfn);
+ int index = gfn_to_index(gfn, slot->base_gfn, PT_PAGE_TABLE_LEVEL);
+
+ if (WARN_ON(!page_track_mode_is_valid(mode)))
+ return false;
+
+ return !!ACCESS_ONCE(slot->arch.gfn_track[mode][index]);
+}
+
+void kvm_page_track_init(struct kvm *kvm)
+{
+ struct kvm_page_track_notifier_head *head;
+
+ head = &kvm->arch.track_notifier_head;
+ init_srcu_struct(&head->track_srcu);
+ INIT_HLIST_HEAD(&head->track_notifier_list);
+}
+
+/*
+ * register the notifier so that event interception for the tracked guest
+ * pages can be received.
+ */
+void
+kvm_page_track_register_notifier(struct kvm *kvm,
+ struct kvm_page_track_notifier_node *n)
+{
+ struct kvm_page_track_notifier_head *head;
+
+ head = &kvm->arch.track_notifier_head;
+
+ spin_lock(&kvm->mmu_lock);
+ hlist_add_head_rcu(&n->node, &head->track_notifier_list);
+ spin_unlock(&kvm->mmu_lock);
+}
+
+/*
+ * stop receiving the event interception. It is the opposed operation of
+ * kvm_page_track_register_notifier().
+ */
+void
+kvm_page_track_unregister_notifier(struct kvm *kvm,
+ struct kvm_page_track_notifier_node *n)
+{
+ struct kvm_page_track_notifier_head *head;
+
+ head = &kvm->arch.track_notifier_head;
+
+ spin_lock(&kvm->mmu_lock);
+ hlist_del_rcu(&n->node);
+ spin_unlock(&kvm->mmu_lock);
+ synchronize_srcu(&head->track_srcu);
+}
+
+/*
+ * Notify the node that write access is intercepted and write emulation is
+ * finished at this time.
+ *
+ * The node should figure out if the written page is the one that node is
+ * interested in by itself.
+ */
+void kvm_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, const u8 *new,
+ int bytes)
+{
+ struct kvm_page_track_notifier_head *head;
+ struct kvm_page_track_notifier_node *n;
+ int idx;
+
+ head = &vcpu->kvm->arch.track_notifier_head;
+
+ if (hlist_empty(&head->track_notifier_list))
+ return;
+
+ idx = srcu_read_lock(&head->track_srcu);
+ hlist_for_each_entry_rcu(n, &head->track_notifier_list, node)
+ if (n->track_write)
+ n->track_write(vcpu, gpa, new, bytes);
+ srcu_read_unlock(&head->track_srcu, idx);
+}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 6c9fed9..e159a81 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -189,8 +189,11 @@
((gpte & VMX_EPT_EXECUTABLE_MASK) ? ACC_EXEC_MASK : 0) |
ACC_USER_MASK;
#else
- access = (gpte & (PT_WRITABLE_MASK | PT_USER_MASK)) | ACC_EXEC_MASK;
- access &= ~(gpte >> PT64_NX_SHIFT);
+ BUILD_BUG_ON(ACC_EXEC_MASK != PT_PRESENT_MASK);
+ BUILD_BUG_ON(ACC_EXEC_MASK != 1);
+ access = gpte & (PT_WRITABLE_MASK | PT_USER_MASK | PT_PRESENT_MASK);
+ /* Combine NX with P (which is set here) to get ACC_EXEC_MASK. */
+ access ^= (gpte >> PT64_NX_SHIFT);
#endif
return access;
@@ -249,7 +252,7 @@
return ret;
kvm_vcpu_mark_page_dirty(vcpu, table_gfn);
- walker->ptes[level] = pte;
+ walker->ptes[level - 1] = pte;
}
return 0;
}
@@ -702,24 +705,17 @@
pgprintk("%s: addr %lx err %x\n", __func__, addr, error_code);
- if (unlikely(error_code & PFERR_RSVD_MASK)) {
- r = handle_mmio_page_fault(vcpu, addr, mmu_is_nested(vcpu));
- if (likely(r != RET_MMIO_PF_INVALID))
- return r;
-
- /*
- * page fault with PFEC.RSVD = 1 is caused by shadow
- * page fault, should not be used to walk guest page
- * table.
- */
- error_code &= ~PFERR_RSVD_MASK;
- };
-
r = mmu_topup_memory_caches(vcpu);
if (r)
return r;
/*
+ * If PFEC.RSVD is set, this is a shadow page fault.
+ * The bit needs to be cleared before walking guest page tables.
+ */
+ error_code &= ~PFERR_RSVD_MASK;
+
+ /*
* Look up the guest pte for the faulting address.
*/
r = FNAME(walk_addr)(&walker, vcpu, addr, error_code);
@@ -735,6 +731,11 @@
return 0;
}
+ if (page_fault_handle_page_track(vcpu, error_code, walker.gfn)) {
+ shadow_page_table_clear_flood(vcpu, addr);
+ return 1;
+ }
+
vcpu->arch.write_fault_to_shadow_pgtable = false;
is_self_change_mapping = FNAME(is_self_change_mapping)(vcpu,
@@ -945,7 +946,7 @@
if (kvm_vcpu_read_guest_atomic(vcpu, pte_gpa, &gpte,
sizeof(pt_element_t)))
- return -EINVAL;
+ return 0;
if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
vcpu->kvm->tlbs_dirty++;
@@ -977,7 +978,7 @@
host_writable);
}
- return !nr_present;
+ return nr_present;
}
#undef pt_element_t
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index c13a64b..9507038 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1858,8 +1858,7 @@
static int vmmcall_interception(struct vcpu_svm *svm)
{
svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
- kvm_emulate_hypercall(&svm->vcpu);
- return 1;
+ return kvm_emulate_hypercall(&svm->vcpu);
}
static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 164eb9e..e512aa7 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -861,7 +861,6 @@
static u64 construct_eptp(unsigned long root_hpa);
static void kvm_cpu_vmxon(u64 addr);
static void kvm_cpu_vmxoff(void);
-static bool vmx_mpx_supported(void);
static bool vmx_xsaves_supported(void);
static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
static void vmx_set_segment(struct kvm_vcpu *vcpu,
@@ -961,25 +960,36 @@
MSR_EFER, MSR_TSC_AUX, MSR_STAR,
};
-static inline bool is_page_fault(u32 intr_info)
+static inline bool is_exception_n(u32 intr_info, u8 vector)
{
return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
INTR_INFO_VALID_MASK)) ==
- (INTR_TYPE_HARD_EXCEPTION | PF_VECTOR | INTR_INFO_VALID_MASK);
+ (INTR_TYPE_HARD_EXCEPTION | vector | INTR_INFO_VALID_MASK);
+}
+
+static inline bool is_debug(u32 intr_info)
+{
+ return is_exception_n(intr_info, DB_VECTOR);
+}
+
+static inline bool is_breakpoint(u32 intr_info)
+{
+ return is_exception_n(intr_info, BP_VECTOR);
+}
+
+static inline bool is_page_fault(u32 intr_info)
+{
+ return is_exception_n(intr_info, PF_VECTOR);
}
static inline bool is_no_device(u32 intr_info)
{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
- INTR_INFO_VALID_MASK)) ==
- (INTR_TYPE_HARD_EXCEPTION | NM_VECTOR | INTR_INFO_VALID_MASK);
+ return is_exception_n(intr_info, NM_VECTOR);
}
static inline bool is_invalid_opcode(u32 intr_info)
{
- return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK |
- INTR_INFO_VALID_MASK)) ==
- (INTR_TYPE_HARD_EXCEPTION | UD_VECTOR | INTR_INFO_VALID_MASK);
+ return is_exception_n(intr_info, UD_VECTOR);
}
static inline bool is_external_interrupt(u32 intr_info)
@@ -2584,7 +2594,7 @@
VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
- if (vmx_mpx_supported())
+ if (kvm_mpx_supported())
vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
/* We support free control of debug control saving. */
@@ -2605,7 +2615,7 @@
VM_ENTRY_LOAD_IA32_PAT;
vmx->nested.nested_vmx_entry_ctls_high |=
(VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
- if (vmx_mpx_supported())
+ if (kvm_mpx_supported())
vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
/* We support free control of debug control loading. */
@@ -2849,7 +2859,7 @@
msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
break;
case MSR_IA32_BNDCFGS:
- if (!vmx_mpx_supported())
+ if (!kvm_mpx_supported())
return 1;
msr_info->data = vmcs_read64(GUEST_BNDCFGS);
break;
@@ -2926,7 +2936,7 @@
vmcs_writel(GUEST_SYSENTER_ESP, data);
break;
case MSR_IA32_BNDCFGS:
- if (!vmx_mpx_supported())
+ if (!kvm_mpx_supported())
return 1;
vmcs_write64(GUEST_BNDCFGS, data);
break;
@@ -3399,7 +3409,7 @@
for (i = j = 0; i < max_shadow_read_write_fields; i++) {
switch (shadow_read_write_fields[i]) {
case GUEST_BNDCFGS:
- if (!vmx_mpx_supported())
+ if (!kvm_mpx_supported())
continue;
break;
default:
@@ -5608,11 +5618,8 @@
}
if (vcpu->guest_debug == 0) {
- u32 cpu_based_vm_exec_control;
-
- cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
- cpu_based_vm_exec_control &= ~CPU_BASED_MOV_DR_EXITING;
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+ vmcs_clear_bits(CPU_BASED_VM_EXEC_CONTROL,
+ CPU_BASED_MOV_DR_EXITING);
/*
* No more DR vmexits; force a reload of the debug registers
@@ -5649,8 +5656,6 @@
static void vmx_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
{
- u32 cpu_based_vm_exec_control;
-
get_debugreg(vcpu->arch.db[0], 0);
get_debugreg(vcpu->arch.db[1], 1);
get_debugreg(vcpu->arch.db[2], 2);
@@ -5659,10 +5664,7 @@
vcpu->arch.dr7 = vmcs_readl(GUEST_DR7);
vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
-
- cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL);
- cpu_based_vm_exec_control |= CPU_BASED_MOV_DR_EXITING;
- vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control);
+ vmcs_set_bits(CPU_BASED_VM_EXEC_CONTROL, CPU_BASED_MOV_DR_EXITING);
}
static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val)
@@ -5747,8 +5749,7 @@
static int handle_vmcall(struct kvm_vcpu *vcpu)
{
- kvm_emulate_hypercall(vcpu);
- return 1;
+ return kvm_emulate_hypercall(vcpu);
}
static int handle_invd(struct kvm_vcpu *vcpu)
@@ -6435,8 +6436,8 @@
if (vmx->nested.vmcs02_num >= max(VMCS02_POOL_SIZE, 1)) {
/* Recycle the least recently used VMCS. */
- item = list_entry(vmx->nested.vmcs02_pool.prev,
- struct vmcs02_list, list);
+ item = list_last_entry(&vmx->nested.vmcs02_pool,
+ struct vmcs02_list, list);
item->vmptr = vmx->nested.current_vmptr;
list_move(&item->list, &vmx->nested.vmcs02_pool);
return &item->vmcs02;
@@ -7752,6 +7753,13 @@
else if (is_no_device(intr_info) &&
!(vmcs12->guest_cr0 & X86_CR0_TS))
return false;
+ else if (is_debug(intr_info) &&
+ vcpu->guest_debug &
+ (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
+ return false;
+ else if (is_breakpoint(intr_info) &&
+ vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
+ return false;
return vmcs12->exception_bitmap &
(1u << (intr_info & INTR_INFO_VECTOR_MASK));
case EXIT_REASON_EXTERNAL_INTERRUPT:
@@ -10256,7 +10264,7 @@
vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
- if (vmx_mpx_supported())
+ if (kvm_mpx_supported())
vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
if (nested_cpu_has_xsaves(vmcs12))
vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index ee3e990..bcbce0f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1203,7 +1203,7 @@
return dividend;
}
-static void kvm_get_time_scale(uint32_t scaled_khz, uint32_t base_khz,
+static void kvm_get_time_scale(uint64_t scaled_hz, uint64_t base_hz,
s8 *pshift, u32 *pmultiplier)
{
uint64_t scaled64;
@@ -1211,8 +1211,8 @@
uint64_t tps64;
uint32_t tps32;
- tps64 = base_khz * 1000LL;
- scaled64 = scaled_khz * 1000LL;
+ tps64 = base_hz;
+ scaled64 = scaled_hz;
while (tps64 > scaled64*2 || tps64 & 0xffffffff00000000ULL) {
tps64 >>= 1;
shift--;
@@ -1230,8 +1230,8 @@
*pshift = shift;
*pmultiplier = div_frac(scaled64, tps32);
- pr_debug("%s: base_khz %u => %u, shift %d, mul %u\n",
- __func__, base_khz, scaled_khz, shift, *pmultiplier);
+ pr_debug("%s: base_hz %llu => %llu, shift %d, mul %u\n",
+ __func__, base_hz, scaled_hz, shift, *pmultiplier);
}
#ifdef CONFIG_X86_64
@@ -1290,23 +1290,23 @@
return 0;
}
-static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
+static int kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz)
{
u32 thresh_lo, thresh_hi;
int use_scaling = 0;
/* tsc_khz can be zero if TSC calibration fails */
- if (this_tsc_khz == 0) {
+ if (user_tsc_khz == 0) {
/* set tsc_scaling_ratio to a safe value */
vcpu->arch.tsc_scaling_ratio = kvm_default_tsc_scaling_ratio;
return -1;
}
/* Compute a scale to convert nanoseconds in TSC cycles */
- kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
+ kvm_get_time_scale(user_tsc_khz * 1000LL, NSEC_PER_SEC,
&vcpu->arch.virtual_tsc_shift,
&vcpu->arch.virtual_tsc_mult);
- vcpu->arch.virtual_tsc_khz = this_tsc_khz;
+ vcpu->arch.virtual_tsc_khz = user_tsc_khz;
/*
* Compute the variation in TSC rate which is acceptable
@@ -1316,11 +1316,11 @@
*/
thresh_lo = adjust_tsc_khz(tsc_khz, -tsc_tolerance_ppm);
thresh_hi = adjust_tsc_khz(tsc_khz, tsc_tolerance_ppm);
- if (this_tsc_khz < thresh_lo || this_tsc_khz > thresh_hi) {
- pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", this_tsc_khz, thresh_lo, thresh_hi);
+ if (user_tsc_khz < thresh_lo || user_tsc_khz > thresh_hi) {
+ pr_debug("kvm: requested TSC rate %u falls outside tolerance [%u,%u]\n", user_tsc_khz, thresh_lo, thresh_hi);
use_scaling = 1;
}
- return set_tsc_khz(vcpu, this_tsc_khz, use_scaling);
+ return set_tsc_khz(vcpu, user_tsc_khz, use_scaling);
}
static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
@@ -1713,7 +1713,7 @@
static int kvm_guest_time_update(struct kvm_vcpu *v)
{
- unsigned long flags, this_tsc_khz, tgt_tsc_khz;
+ unsigned long flags, tgt_tsc_khz;
struct kvm_vcpu_arch *vcpu = &v->arch;
struct kvm_arch *ka = &v->kvm->arch;
s64 kernel_ns;
@@ -1739,8 +1739,8 @@
/* Keep irq disabled to prevent changes to the clock */
local_irq_save(flags);
- this_tsc_khz = __this_cpu_read(cpu_tsc_khz);
- if (unlikely(this_tsc_khz == 0)) {
+ tgt_tsc_khz = __this_cpu_read(cpu_tsc_khz);
+ if (unlikely(tgt_tsc_khz == 0)) {
local_irq_restore(flags);
kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
return 1;
@@ -1775,13 +1775,14 @@
if (!vcpu->pv_time_enabled)
return 0;
- if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
- tgt_tsc_khz = kvm_has_tsc_control ?
- vcpu->virtual_tsc_khz : this_tsc_khz;
- kvm_get_time_scale(NSEC_PER_SEC / 1000, tgt_tsc_khz,
+ if (kvm_has_tsc_control)
+ tgt_tsc_khz = kvm_scale_tsc(v, tgt_tsc_khz);
+
+ if (unlikely(vcpu->hw_tsc_khz != tgt_tsc_khz)) {
+ kvm_get_time_scale(NSEC_PER_SEC, tgt_tsc_khz * 1000LL,
&vcpu->hv_clock.tsc_shift,
&vcpu->hv_clock.tsc_to_system_mul);
- vcpu->hw_tsc_khz = this_tsc_khz;
+ vcpu->hw_tsc_khz = tgt_tsc_khz;
}
/* With all the info we got, fill in the values */
@@ -2749,6 +2750,7 @@
}
kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu);
+ vcpu->arch.switch_db_regs |= KVM_DEBUGREG_RELOAD;
}
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
@@ -3602,20 +3604,26 @@
static int kvm_vm_ioctl_get_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
- mutex_lock(&kvm->arch.vpit->pit_state.lock);
- memcpy(ps, &kvm->arch.vpit->pit_state, sizeof(struct kvm_pit_state));
- mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ struct kvm_kpit_state *kps = &kvm->arch.vpit->pit_state;
+
+ BUILD_BUG_ON(sizeof(*ps) != sizeof(kps->channels));
+
+ mutex_lock(&kps->lock);
+ memcpy(ps, &kps->channels, sizeof(*ps));
+ mutex_unlock(&kps->lock);
return 0;
}
static int kvm_vm_ioctl_set_pit(struct kvm *kvm, struct kvm_pit_state *ps)
{
int i;
- mutex_lock(&kvm->arch.vpit->pit_state.lock);
- memcpy(&kvm->arch.vpit->pit_state, ps, sizeof(struct kvm_pit_state));
+ struct kvm_pit *pit = kvm->arch.vpit;
+
+ mutex_lock(&pit->pit_state.lock);
+ memcpy(&pit->pit_state.channels, ps, sizeof(*ps));
for (i = 0; i < 3; i++)
- kvm_pit_load_count(kvm, i, ps->channels[i].count, 0);
- mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ kvm_pit_load_count(pit, i, ps->channels[i].count, 0);
+ mutex_unlock(&pit->pit_state.lock);
return 0;
}
@@ -3635,29 +3643,39 @@
int start = 0;
int i;
u32 prev_legacy, cur_legacy;
- mutex_lock(&kvm->arch.vpit->pit_state.lock);
- prev_legacy = kvm->arch.vpit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
+ struct kvm_pit *pit = kvm->arch.vpit;
+
+ mutex_lock(&pit->pit_state.lock);
+ prev_legacy = pit->pit_state.flags & KVM_PIT_FLAGS_HPET_LEGACY;
cur_legacy = ps->flags & KVM_PIT_FLAGS_HPET_LEGACY;
if (!prev_legacy && cur_legacy)
start = 1;
- memcpy(&kvm->arch.vpit->pit_state.channels, &ps->channels,
- sizeof(kvm->arch.vpit->pit_state.channels));
- kvm->arch.vpit->pit_state.flags = ps->flags;
+ memcpy(&pit->pit_state.channels, &ps->channels,
+ sizeof(pit->pit_state.channels));
+ pit->pit_state.flags = ps->flags;
for (i = 0; i < 3; i++)
- kvm_pit_load_count(kvm, i, kvm->arch.vpit->pit_state.channels[i].count,
+ kvm_pit_load_count(pit, i, pit->pit_state.channels[i].count,
start && i == 0);
- mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+ mutex_unlock(&pit->pit_state.lock);
return 0;
}
static int kvm_vm_ioctl_reinject(struct kvm *kvm,
struct kvm_reinject_control *control)
{
- if (!kvm->arch.vpit)
+ struct kvm_pit *pit = kvm->arch.vpit;
+
+ if (!pit)
return -ENXIO;
- mutex_lock(&kvm->arch.vpit->pit_state.lock);
- kvm->arch.vpit->pit_state.reinject = control->pit_reinject;
- mutex_unlock(&kvm->arch.vpit->pit_state.lock);
+
+ /* pit->pit_state.lock was overloaded to prevent userspace from getting
+ * an inconsistent state after running multiple KVM_REINJECT_CONTROL
+ * ioctls in parallel. Use a separate lock if that ioctl isn't rare.
+ */
+ mutex_lock(&pit->pit_state.lock);
+ kvm_pit_set_reinject(pit, control->pit_reinject);
+ mutex_unlock(&pit->pit_state.lock);
+
return 0;
}
@@ -4343,7 +4361,7 @@
ret = kvm_vcpu_write_guest(vcpu, gpa, val, bytes);
if (ret < 0)
return 0;
- kvm_mmu_pte_write(vcpu, gpa, val, bytes);
+ kvm_page_track_write(vcpu, gpa, val, bytes);
return 1;
}
@@ -4601,7 +4619,7 @@
return X86EMUL_CMPXCHG_FAILED;
kvm_vcpu_mark_page_dirty(vcpu, gpa >> PAGE_SHIFT);
- kvm_mmu_pte_write(vcpu, gpa, new, bytes);
+ kvm_page_track_write(vcpu, gpa, new, bytes);
return X86EMUL_CONTINUE;
@@ -7311,7 +7329,7 @@
* Every 255 times fpu_counter rolls over to 0; a guest that uses
* the FPU in bursts will revert to loading it on demand.
*/
- if (!vcpu->arch.eager_fpu) {
+ if (!use_eager_fpu()) {
if (++vcpu->fpu_counter < 5)
kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
}
@@ -7722,6 +7740,9 @@
INIT_DELAYED_WORK(&kvm->arch.kvmclock_update_work, kvmclock_update_fn);
INIT_DELAYED_WORK(&kvm->arch.kvmclock_sync_work, kvmclock_sync_fn);
+ kvm_page_track_init(kvm);
+ kvm_mmu_init_vm(kvm);
+
return 0;
}
@@ -7848,6 +7869,7 @@
kfree(kvm->arch.vioapic);
kvm_free_vcpus(kvm);
kfree(rcu_dereference_check(kvm->arch.apic_map, 1));
+ kvm_mmu_uninit_vm(kvm);
}
void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
@@ -7869,6 +7891,8 @@
free->arch.lpage_info[i - 1] = NULL;
}
}
+
+ kvm_page_track_free_memslot(free, dont);
}
int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
@@ -7877,6 +7901,7 @@
int i;
for (i = 0; i < KVM_NR_PAGE_SIZES; ++i) {
+ struct kvm_lpage_info *linfo;
unsigned long ugfn;
int lpages;
int level = i + 1;
@@ -7891,15 +7916,16 @@
if (i == 0)
continue;
- slot->arch.lpage_info[i - 1] = kvm_kvzalloc(lpages *
- sizeof(*slot->arch.lpage_info[i - 1]));
- if (!slot->arch.lpage_info[i - 1])
+ linfo = kvm_kvzalloc(lpages * sizeof(*linfo));
+ if (!linfo)
goto out_free;
+ slot->arch.lpage_info[i - 1] = linfo;
+
if (slot->base_gfn & (KVM_PAGES_PER_HPAGE(level) - 1))
- slot->arch.lpage_info[i - 1][0].write_count = 1;
+ linfo[0].disallow_lpage = 1;
if ((slot->base_gfn + npages) & (KVM_PAGES_PER_HPAGE(level) - 1))
- slot->arch.lpage_info[i - 1][lpages - 1].write_count = 1;
+ linfo[lpages - 1].disallow_lpage = 1;
ugfn = slot->userspace_addr >> PAGE_SHIFT;
/*
* If the gfn and userspace address are not aligned wrt each
@@ -7911,10 +7937,13 @@
unsigned long j;
for (j = 0; j < lpages; ++j)
- slot->arch.lpage_info[i - 1][j].write_count = 1;
+ linfo[j].disallow_lpage = 1;
}
}
+ if (kvm_page_track_create_memslot(slot, npages))
+ goto out_free;
+
return 0;
out_free:
diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
index 982ce34..27f89c7 100644
--- a/arch/x86/lib/copy_user_64.S
+++ b/arch/x86/lib/copy_user_64.S
@@ -232,17 +232,31 @@
/*
* copy_user_nocache - Uncached memory copy with exception handling
- * This will force destination/source out of cache for more performance.
+ * This will force destination out of cache for more performance.
+ *
+ * Note: Cached memory copy is used when destination or size is not
+ * naturally aligned. That is:
+ * - Require 8-byte alignment when size is 8 bytes or larger.
+ * - Require 4-byte alignment when size is 4 bytes.
*/
ENTRY(__copy_user_nocache)
ASM_STAC
+
+ /* If size is less than 8 bytes, go to 4-byte copy */
cmpl $8,%edx
- jb 20f /* less then 8 bytes, go to byte copy loop */
+ jb .L_4b_nocache_copy_entry
+
+ /* If destination is not 8-byte aligned, "cache" copy to align it */
ALIGN_DESTINATION
+
+ /* Set 4x8-byte copy count and remainder */
movl %edx,%ecx
andl $63,%edx
shrl $6,%ecx
- jz 17f
+ jz .L_8b_nocache_copy_entry /* jump if count is 0 */
+
+ /* Perform 4x8-byte nocache loop-copy */
+.L_4x8b_nocache_copy_loop:
1: movq (%rsi),%r8
2: movq 1*8(%rsi),%r9
3: movq 2*8(%rsi),%r10
@@ -262,60 +276,106 @@
leaq 64(%rsi),%rsi
leaq 64(%rdi),%rdi
decl %ecx
- jnz 1b
-17: movl %edx,%ecx
+ jnz .L_4x8b_nocache_copy_loop
+
+ /* Set 8-byte copy count and remainder */
+.L_8b_nocache_copy_entry:
+ movl %edx,%ecx
andl $7,%edx
shrl $3,%ecx
- jz 20f
-18: movq (%rsi),%r8
-19: movnti %r8,(%rdi)
+ jz .L_4b_nocache_copy_entry /* jump if count is 0 */
+
+ /* Perform 8-byte nocache loop-copy */
+.L_8b_nocache_copy_loop:
+20: movq (%rsi),%r8
+21: movnti %r8,(%rdi)
leaq 8(%rsi),%rsi
leaq 8(%rdi),%rdi
decl %ecx
- jnz 18b
-20: andl %edx,%edx
- jz 23f
+ jnz .L_8b_nocache_copy_loop
+
+ /* If no byte left, we're done */
+.L_4b_nocache_copy_entry:
+ andl %edx,%edx
+ jz .L_finish_copy
+
+ /* If destination is not 4-byte aligned, go to byte copy: */
+ movl %edi,%ecx
+ andl $3,%ecx
+ jnz .L_1b_cache_copy_entry
+
+ /* Set 4-byte copy count (1 or 0) and remainder */
movl %edx,%ecx
-21: movb (%rsi),%al
-22: movb %al,(%rdi)
+ andl $3,%edx
+ shrl $2,%ecx
+ jz .L_1b_cache_copy_entry /* jump if count is 0 */
+
+ /* Perform 4-byte nocache copy: */
+30: movl (%rsi),%r8d
+31: movnti %r8d,(%rdi)
+ leaq 4(%rsi),%rsi
+ leaq 4(%rdi),%rdi
+
+ /* If no bytes left, we're done: */
+ andl %edx,%edx
+ jz .L_finish_copy
+
+ /* Perform byte "cache" loop-copy for the remainder */
+.L_1b_cache_copy_entry:
+ movl %edx,%ecx
+.L_1b_cache_copy_loop:
+40: movb (%rsi),%al
+41: movb %al,(%rdi)
incq %rsi
incq %rdi
decl %ecx
- jnz 21b
-23: xorl %eax,%eax
+ jnz .L_1b_cache_copy_loop
+
+ /* Finished copying; fence the prior stores */
+.L_finish_copy:
+ xorl %eax,%eax
ASM_CLAC
sfence
ret
.section .fixup,"ax"
-30: shll $6,%ecx
+.L_fixup_4x8b_copy:
+ shll $6,%ecx
addl %ecx,%edx
- jmp 60f
-40: lea (%rdx,%rcx,8),%rdx
- jmp 60f
-50: movl %ecx,%edx
-60: sfence
+ jmp .L_fixup_handle_tail
+.L_fixup_8b_copy:
+ lea (%rdx,%rcx,8),%rdx
+ jmp .L_fixup_handle_tail
+.L_fixup_4b_copy:
+ lea (%rdx,%rcx,4),%rdx
+ jmp .L_fixup_handle_tail
+.L_fixup_1b_copy:
+ movl %ecx,%edx
+.L_fixup_handle_tail:
+ sfence
jmp copy_user_handle_tail
.previous
- _ASM_EXTABLE(1b,30b)
- _ASM_EXTABLE(2b,30b)
- _ASM_EXTABLE(3b,30b)
- _ASM_EXTABLE(4b,30b)
- _ASM_EXTABLE(5b,30b)
- _ASM_EXTABLE(6b,30b)
- _ASM_EXTABLE(7b,30b)
- _ASM_EXTABLE(8b,30b)
- _ASM_EXTABLE(9b,30b)
- _ASM_EXTABLE(10b,30b)
- _ASM_EXTABLE(11b,30b)
- _ASM_EXTABLE(12b,30b)
- _ASM_EXTABLE(13b,30b)
- _ASM_EXTABLE(14b,30b)
- _ASM_EXTABLE(15b,30b)
- _ASM_EXTABLE(16b,30b)
- _ASM_EXTABLE(18b,40b)
- _ASM_EXTABLE(19b,40b)
- _ASM_EXTABLE(21b,50b)
- _ASM_EXTABLE(22b,50b)
+ _ASM_EXTABLE(1b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(2b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(3b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(4b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(5b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(6b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(7b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(8b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(9b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(10b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(11b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(12b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(13b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(14b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(15b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(16b,.L_fixup_4x8b_copy)
+ _ASM_EXTABLE(20b,.L_fixup_8b_copy)
+ _ASM_EXTABLE(21b,.L_fixup_8b_copy)
+ _ASM_EXTABLE(30b,.L_fixup_4b_copy)
+ _ASM_EXTABLE(31b,.L_fixup_4b_copy)
+ _ASM_EXTABLE(40b,.L_fixup_1b_copy)
+ _ASM_EXTABLE(41b,.L_fixup_1b_copy)
ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
index eef44d9..e830c71 100644
--- a/arch/x86/mm/fault.c
+++ b/arch/x86/mm/fault.c
@@ -287,6 +287,9 @@
if (!pmd_k)
return -1;
+ if (pmd_huge(*pmd_k))
+ return 0;
+
pte_k = pte_offset_kernel(pmd_k, address);
if (!pte_present(*pte_k))
return -1;
@@ -360,8 +363,6 @@
* 64-bit:
*
* Handle a fault on the vmalloc area
- *
- * This assumes no large pages in there.
*/
static noinline int vmalloc_fault(unsigned long address)
{
@@ -403,17 +404,23 @@
if (pud_none(*pud_ref))
return -1;
- if (pud_none(*pud) || pud_page_vaddr(*pud) != pud_page_vaddr(*pud_ref))
+ if (pud_none(*pud) || pud_pfn(*pud) != pud_pfn(*pud_ref))
BUG();
+ if (pud_huge(*pud))
+ return 0;
+
pmd = pmd_offset(pud, address);
pmd_ref = pmd_offset(pud_ref, address);
if (pmd_none(*pmd_ref))
return -1;
- if (pmd_none(*pmd) || pmd_page(*pmd) != pmd_page(*pmd_ref))
+ if (pmd_none(*pmd) || pmd_pfn(*pmd) != pmd_pfn(*pmd_ref))
BUG();
+ if (pmd_huge(*pmd))
+ return 0;
+
pte_ref = pte_offset_kernel(pmd_ref, address);
if (!pte_present(*pte_ref))
return -1;
diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
index 6d5eb59..d8a798d 100644
--- a/arch/x86/mm/gup.c
+++ b/arch/x86/mm/gup.c
@@ -102,7 +102,6 @@
return 0;
}
- page = pte_page(pte);
if (pte_devmap(pte)) {
pgmap = get_dev_pagemap(pte_pfn(pte), pgmap);
if (unlikely(!pgmap)) {
@@ -115,6 +114,7 @@
return 0;
}
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
+ page = pte_page(pte);
get_page(page);
put_dev_pagemap(pgmap);
SetPageReferenced(page);
diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
index 96bd1e2..72bb52f 100644
--- a/arch/x86/mm/mmap.c
+++ b/arch/x86/mm/mmap.c
@@ -71,12 +71,12 @@
if (mmap_is_ia32())
#ifdef CONFIG_COMPAT
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_compat_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_compat_bits) - 1);
#else
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
#endif
else
- rnd = (unsigned long)get_random_int() & ((1 << mmap_rnd_bits) - 1);
+ rnd = get_random_long() & ((1UL << mmap_rnd_bits) - 1);
return rnd << PAGE_SHIFT;
}
diff --git a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c
index b2fd67d..ef05755 100644
--- a/arch/x86/mm/mpx.c
+++ b/arch/x86/mm/mpx.c
@@ -123,7 +123,7 @@
break;
}
- if (regno > nr_registers) {
+ if (regno >= nr_registers) {
WARN_ONCE(1, "decoded an instruction with an invalid register");
return -EINVAL;
}
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index c3b3f65..d04f809 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -469,7 +469,7 @@
{
int i, nid;
nodemask_t numa_kernel_nodes = NODE_MASK_NONE;
- unsigned long start, end;
+ phys_addr_t start, end;
struct memblock_region *r;
/*
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 2440814..9cf96d8 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -419,24 +419,30 @@
phys_addr_t slow_virt_to_phys(void *__virt_addr)
{
unsigned long virt_addr = (unsigned long)__virt_addr;
- unsigned long phys_addr, offset;
+ phys_addr_t phys_addr;
+ unsigned long offset;
enum pg_level level;
pte_t *pte;
pte = lookup_address(virt_addr, &level);
BUG_ON(!pte);
+ /*
+ * pXX_pfn() returns unsigned long, which must be cast to phys_addr_t
+ * before being left-shifted PAGE_SHIFT bits -- this trick is to
+ * make 32-PAE kernel work correctly.
+ */
switch (level) {
case PG_LEVEL_1G:
- phys_addr = pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
+ phys_addr = (phys_addr_t)pud_pfn(*(pud_t *)pte) << PAGE_SHIFT;
offset = virt_addr & ~PUD_PAGE_MASK;
break;
case PG_LEVEL_2M:
- phys_addr = pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
+ phys_addr = (phys_addr_t)pmd_pfn(*(pmd_t *)pte) << PAGE_SHIFT;
offset = virt_addr & ~PMD_PAGE_MASK;
break;
default:
- phys_addr = pte_pfn(*pte) << PAGE_SHIFT;
+ phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
offset = virt_addr & ~PAGE_MASK;
}
diff --git a/arch/x86/pci/common.c b/arch/x86/pci/common.c
index 2879efc..d34b511 100644
--- a/arch/x86/pci/common.c
+++ b/arch/x86/pci/common.c
@@ -711,28 +711,22 @@
return 0;
}
-int pcibios_alloc_irq(struct pci_dev *dev)
-{
- /*
- * If the PCI device was already claimed by core code and has
- * MSI enabled, probing of the pcibios IRQ will overwrite
- * dev->irq. So bail out if MSI is already enabled.
- */
- if (pci_dev_msi_enabled(dev))
- return -EBUSY;
-
- return pcibios_enable_irq(dev);
-}
-
-void pcibios_free_irq(struct pci_dev *dev)
-{
- if (pcibios_disable_irq)
- pcibios_disable_irq(dev);
-}
-
int pcibios_enable_device(struct pci_dev *dev, int mask)
{
- return pci_enable_resources(dev, mask);
+ int err;
+
+ if ((err = pci_enable_resources(dev, mask)) < 0)
+ return err;
+
+ if (!pci_dev_msi_enabled(dev))
+ return pcibios_enable_irq(dev);
+ return 0;
+}
+
+void pcibios_disable_device (struct pci_dev *dev)
+{
+ if (!pci_dev_msi_enabled(dev) && pcibios_disable_irq)
+ pcibios_disable_irq(dev);
}
int pci_ext_cfg_avail(void)
diff --git a/arch/x86/pci/intel_mid_pci.c b/arch/x86/pci/intel_mid_pci.c
index 0d24e7c..8b93e63 100644
--- a/arch/x86/pci/intel_mid_pci.c
+++ b/arch/x86/pci/intel_mid_pci.c
@@ -215,7 +215,7 @@
int polarity;
int ret;
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
switch (intel_mid_identify_cpu()) {
@@ -256,13 +256,10 @@
static void intel_mid_pci_irq_disable(struct pci_dev *dev)
{
- if (pci_has_managed_irq(dev)) {
+ if (!mp_should_keep_irq(&dev->dev) && dev->irq_managed &&
+ dev->irq > 0) {
mp_unmap_irq(dev->irq);
dev->irq_managed = 0;
- /*
- * Don't reset dev->irq here, otherwise
- * intel_mid_pci_irq_enable() will fail on next call.
- */
}
}
diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
index 32e7034..9bd1154 100644
--- a/arch/x86/pci/irq.c
+++ b/arch/x86/pci/irq.c
@@ -1202,7 +1202,7 @@
struct pci_dev *temp_dev;
int irq;
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
irq = IO_APIC_get_PCI_irq_vector(dev->bus->number,
@@ -1230,7 +1230,8 @@
}
dev = temp_dev;
if (irq >= 0) {
- pci_set_managed_irq(dev, irq);
+ dev->irq_managed = 1;
+ dev->irq = irq;
dev_info(&dev->dev, "PCI->APIC IRQ transform: "
"INT %c -> IRQ %d\n", 'A' + pin - 1, irq);
return 0;
@@ -1256,10 +1257,24 @@
return 0;
}
+bool mp_should_keep_irq(struct device *dev)
+{
+ if (dev->power.is_prepared)
+ return true;
+#ifdef CONFIG_PM
+ if (dev->power.runtime_status == RPM_SUSPENDING)
+ return true;
+#endif
+
+ return false;
+}
+
static void pirq_disable_irq(struct pci_dev *dev)
{
- if (io_apic_assign_pci_irqs && pci_has_managed_irq(dev)) {
+ if (io_apic_assign_pci_irqs && !mp_should_keep_irq(&dev->dev) &&
+ dev->irq_managed && dev->irq) {
mp_unmap_irq(dev->irq);
- pci_reset_managed_irq(dev);
+ dev->irq = 0;
+ dev->irq_managed = 0;
}
}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index ff31ab4..beac4df 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -196,7 +196,10 @@
return 0;
error:
- dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+ if (ret == -ENOSYS)
+ dev_err(&dev->dev, "Xen PCI frontend has not registered MSI/MSI-X support!\n");
+ else if (ret)
+ dev_err(&dev->dev, "Xen PCI frontend error: %d!\n", ret);
free:
kfree(v);
return ret;
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
index c61b6c3..bfadcd0 100644
--- a/arch/x86/platform/intel-quark/imr.c
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -592,14 +592,14 @@
end = (unsigned long)__end_rodata - 1;
/*
- * Setup a locked IMR around the physical extent of the kernel
+ * Setup an unlocked IMR around the physical extent of the kernel
* from the beginning of the .text secton to the end of the
* .rodata section as one physically contiguous block.
*
* We don't round up @size since it is already PAGE_SIZE aligned.
* See vmlinux.lds.S for details.
*/
- ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, true);
+ ret = imr_add_range(base, size, IMR_CPU, IMR_CPU, false);
if (ret < 0) {
pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
size / 1024, start, end);
diff --git a/block/Kconfig b/block/Kconfig
index 161491d..0363cd7 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -88,6 +88,19 @@
T10/SCSI Data Integrity Field or the T13/ATA External Path
Protection. If in doubt, say N.
+config BLK_DEV_DAX
+ bool "Block device DAX support"
+ depends on FS_DAX
+ depends on BROKEN
+ help
+ When DAX support is available (CONFIG_FS_DAX) raw block
+ devices can also support direct userspace access to the
+ storage capacity via MMAP(2) similar to a file on a
+ DAX-enabled filesystem. However, the DAX I/O-path disables
+ some standard I/O-statistics, and the MMAP(2) path has some
+ operational differences due to bypassing the page
+ cache. If in doubt, say N.
+
config BLK_DEV_THROTTLING
bool "Block layer bio throttling support"
depends on BLK_CGROUP=y
diff --git a/block/bio.c b/block/bio.c
index dbabd48..cf75915 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -874,7 +874,7 @@
bio->bi_private = &ret;
bio->bi_end_io = submit_bio_wait_endio;
submit_bio(rw, bio);
- wait_for_completion(&ret.event);
+ wait_for_completion_io(&ret.event);
return ret.error;
}
@@ -1090,9 +1090,12 @@
if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
/*
* if we're in a workqueue, the request is orphaned, so
- * don't copy into a random user address space, just free.
+ * don't copy into a random user address space, just free
+ * and return -EINTR so user space doesn't expect any data.
*/
- if (current->mm && bio_data_dir(bio) == READ)
+ if (!current->mm)
+ ret = -EINTR;
+ else if (bio_data_dir(bio) == READ)
ret = bio_copy_to_iter(bio, bmd->iter);
if (bmd->is_our_pages)
bio_free_pages(bio);
diff --git a/block/blk-cgroup.c b/block/blk-cgroup.c
index 5a37188..66e6f1aa 100644
--- a/block/blk-cgroup.c
+++ b/block/blk-cgroup.c
@@ -788,6 +788,7 @@
{
struct gendisk *disk;
struct blkcg_gq *blkg;
+ struct module *owner;
unsigned int major, minor;
int key_len, part, ret;
char *body;
@@ -804,7 +805,9 @@
if (!disk)
return -ENODEV;
if (part) {
+ owner = disk->fops->owner;
put_disk(disk);
+ module_put(owner);
return -ENODEV;
}
@@ -820,7 +823,9 @@
ret = PTR_ERR(blkg);
rcu_read_unlock();
spin_unlock_irq(disk->queue->queue_lock);
+ owner = disk->fops->owner;
put_disk(disk);
+ module_put(owner);
/*
* If queue was bypassing, we should retry. Do so after a
* short msleep(). It isn't strictly necessary but queue
@@ -851,9 +856,13 @@
void blkg_conf_finish(struct blkg_conf_ctx *ctx)
__releases(ctx->disk->queue->queue_lock) __releases(rcu)
{
+ struct module *owner;
+
spin_unlock_irq(ctx->disk->queue->queue_lock);
rcu_read_unlock();
+ owner = ctx->disk->fops->owner;
put_disk(ctx->disk);
+ module_put(owner);
}
EXPORT_SYMBOL_GPL(blkg_conf_finish);
diff --git a/block/blk-core.c b/block/blk-core.c
index ab51685..b83d297 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -2455,14 +2455,16 @@
rq = NULL;
break;
- } else if (ret == BLKPREP_KILL) {
+ } else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
+ int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
+
rq->cmd_flags |= REQ_QUIET;
/*
* Mark this request as started so we don't trigger
* any debug logic in the end I/O path.
*/
blk_start_request(rq);
- __blk_end_request_all(rq, -EIO);
+ __blk_end_request_all(rq, err);
} else {
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
break;
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 4c0622f..56c0a72 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -599,8 +599,10 @@
* If a request wasn't started before the queue was
* marked dying, kill it here or it'll go unnoticed.
*/
- if (unlikely(blk_queue_dying(rq->q)))
- blk_mq_complete_request(rq, -EIO);
+ if (unlikely(blk_queue_dying(rq->q))) {
+ rq->errors = -EIO;
+ blk_mq_end_request(rq, rq->errors);
+ }
return;
}
diff --git a/block/blk-settings.c b/block/blk-settings.c
index dd49735..c7bb666 100644
--- a/block/blk-settings.c
+++ b/block/blk-settings.c
@@ -91,8 +91,8 @@
lim->seg_boundary_mask = BLK_SEG_BOUNDARY_MASK;
lim->virt_boundary_mask = 0;
lim->max_segment_size = BLK_MAX_SEGMENT_SIZE;
- lim->max_sectors = lim->max_dev_sectors = lim->max_hw_sectors =
- BLK_SAFE_MAX_SECTORS;
+ lim->max_sectors = lim->max_hw_sectors = BLK_SAFE_MAX_SECTORS;
+ lim->max_dev_sectors = 0;
lim->chunk_sectors = 0;
lim->max_write_same_sectors = 0;
lim->max_discard_sectors = 0;
diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c
index e140cc4..dd937630 100644
--- a/block/blk-sysfs.c
+++ b/block/blk-sysfs.c
@@ -147,10 +147,9 @@
static ssize_t queue_discard_max_hw_show(struct request_queue *q, char *page)
{
- unsigned long long val;
- val = q->limits.max_hw_discard_sectors << 9;
- return sprintf(page, "%llu\n", val);
+ return sprintf(page, "%llu\n",
+ (unsigned long long)q->limits.max_hw_discard_sectors << 9);
}
static ssize_t queue_discard_max_show(struct request_queue *q, char *page)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index a753df2..d0dd788 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -39,7 +39,6 @@
*/
struct request *next_rq[2];
unsigned int batching; /* number of sequential requests made */
- sector_t last_sector; /* head position */
unsigned int starved; /* times reads have starved writes */
/*
@@ -210,8 +209,6 @@
dd->next_rq[WRITE] = NULL;
dd->next_rq[data_dir] = deadline_latter_request(rq);
- dd->last_sector = rq_end_sector(rq);
-
/*
* take it off the sort and fifo list, move
* to dispatch queue
diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c
index 38c1aa8..28556fc 100644
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -65,18 +65,10 @@
struct skcipher_async_rsgl first_sgl;
struct list_head list;
struct scatterlist *tsg;
- char iv[];
+ atomic_t *inflight;
+ struct skcipher_request req;
};
-#define GET_SREQ(areq, ctx) (struct skcipher_async_req *)((char *)areq + \
- crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req)))
-
-#define GET_REQ_SIZE(ctx) \
- crypto_skcipher_reqsize(crypto_skcipher_reqtfm(&ctx->req))
-
-#define GET_IV_SIZE(ctx) \
- crypto_skcipher_ivsize(crypto_skcipher_reqtfm(&ctx->req))
-
#define MAX_SGL_ENTS ((4096 - sizeof(struct skcipher_sg_list)) / \
sizeof(struct scatterlist) - 1)
@@ -102,15 +94,12 @@
static void skcipher_async_cb(struct crypto_async_request *req, int err)
{
- struct sock *sk = req->data;
- struct alg_sock *ask = alg_sk(sk);
- struct skcipher_ctx *ctx = ask->private;
- struct skcipher_async_req *sreq = GET_SREQ(req, ctx);
+ struct skcipher_async_req *sreq = req->data;
struct kiocb *iocb = sreq->iocb;
- atomic_dec(&ctx->inflight);
+ atomic_dec(sreq->inflight);
skcipher_free_async_sgls(sreq);
- kfree(req);
+ kzfree(sreq);
iocb->ki_complete(iocb, err, err);
}
@@ -306,8 +295,11 @@
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
- struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(&ctx->req);
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
unsigned ivsize = crypto_skcipher_ivsize(tfm);
struct skcipher_sg_list *sgl;
struct af_alg_control con = {};
@@ -509,37 +501,43 @@
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
struct skcipher_async_req *sreq;
struct skcipher_request *req;
struct skcipher_async_rsgl *last_rsgl = NULL;
- unsigned int txbufs = 0, len = 0, tx_nents = skcipher_all_sg_nents(ctx);
- unsigned int reqlen = sizeof(struct skcipher_async_req) +
- GET_REQ_SIZE(ctx) + GET_IV_SIZE(ctx);
+ unsigned int txbufs = 0, len = 0, tx_nents;
+ unsigned int reqsize = crypto_skcipher_reqsize(tfm);
+ unsigned int ivsize = crypto_skcipher_ivsize(tfm);
int err = -ENOMEM;
bool mark = false;
+ char *iv;
+
+ sreq = kzalloc(sizeof(*sreq) + reqsize + ivsize, GFP_KERNEL);
+ if (unlikely(!sreq))
+ goto out;
+
+ req = &sreq->req;
+ iv = (char *)(req + 1) + reqsize;
+ sreq->iocb = msg->msg_iocb;
+ INIT_LIST_HEAD(&sreq->list);
+ sreq->inflight = &ctx->inflight;
lock_sock(sk);
- req = kmalloc(reqlen, GFP_KERNEL);
- if (unlikely(!req))
- goto unlock;
-
- sreq = GET_SREQ(req, ctx);
- sreq->iocb = msg->msg_iocb;
- memset(&sreq->first_sgl, '\0', sizeof(struct skcipher_async_rsgl));
- INIT_LIST_HEAD(&sreq->list);
+ tx_nents = skcipher_all_sg_nents(ctx);
sreq->tsg = kcalloc(tx_nents, sizeof(*sg), GFP_KERNEL);
- if (unlikely(!sreq->tsg)) {
- kfree(req);
+ if (unlikely(!sreq->tsg))
goto unlock;
- }
sg_init_table(sreq->tsg, tx_nents);
- memcpy(sreq->iv, ctx->iv, GET_IV_SIZE(ctx));
- skcipher_request_set_tfm(req, crypto_skcipher_reqtfm(&ctx->req));
- skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG,
- skcipher_async_cb, sk);
+ memcpy(iv, ctx->iv, ivsize);
+ skcipher_request_set_tfm(req, tfm);
+ skcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_SLEEP,
+ skcipher_async_cb, sreq);
while (iov_iter_count(&msg->msg_iter)) {
struct skcipher_async_rsgl *rsgl;
@@ -615,20 +613,22 @@
sg_mark_end(sreq->tsg + txbufs - 1);
skcipher_request_set_crypt(req, sreq->tsg, sreq->first_sgl.sgl.sg,
- len, sreq->iv);
+ len, iv);
err = ctx->enc ? crypto_skcipher_encrypt(req) :
crypto_skcipher_decrypt(req);
if (err == -EINPROGRESS) {
atomic_inc(&ctx->inflight);
err = -EIOCBQUEUED;
+ sreq = NULL;
goto unlock;
}
free:
skcipher_free_async_sgls(sreq);
- kfree(req);
unlock:
skcipher_wmem_wakeup(sk);
release_sock(sk);
+ kzfree(sreq);
+out:
return err;
}
@@ -637,9 +637,12 @@
{
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
struct skcipher_ctx *ctx = ask->private;
- unsigned bs = crypto_skcipher_blocksize(crypto_skcipher_reqtfm(
- &ctx->req));
+ struct skcipher_tfm *skc = pask->private;
+ struct crypto_skcipher *tfm = skc->skcipher;
+ unsigned bs = crypto_skcipher_blocksize(tfm);
struct skcipher_sg_list *sgl;
struct scatterlist *sg;
int err = -EAGAIN;
@@ -947,7 +950,8 @@
ask->private = ctx;
skcipher_request_set_tfm(&ctx->req, skcipher);
- skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_BACKLOG,
+ skcipher_request_set_callback(&ctx->req, CRYPTO_TFM_REQ_MAY_SLEEP |
+ CRYPTO_TFM_REQ_MAY_BACKLOG,
af_alg_complete, &ctx->completion);
sk->sk_destruct = skcipher_sock_destruct;
diff --git a/crypto/crypto_user.c b/crypto/crypto_user.c
index 237f379..43fe85f2 100644
--- a/crypto/crypto_user.c
+++ b/crypto/crypto_user.c
@@ -499,6 +499,7 @@
if (link->dump == NULL)
return -EINVAL;
+ down_read(&crypto_alg_sem);
list_for_each_entry(alg, &crypto_alg_list, cra_list)
dump_alloc += CRYPTO_REPORT_MAXSIZE;
@@ -508,8 +509,11 @@
.done = link->done,
.min_dump_alloc = dump_alloc,
};
- return netlink_dump_start(crypto_nlsk, skb, nlh, &c);
+ err = netlink_dump_start(crypto_nlsk, skb, nlh, &c);
}
+ up_read(&crypto_alg_sem);
+
+ return err;
}
err = nlmsg_parse(nlh, crypto_msg_min[type], attrs, CRYPTOCFGA_MAX,
diff --git a/drivers/acpi/nfit.c b/drivers/acpi/nfit.c
index ad6d8c6..fb53db1 100644
--- a/drivers/acpi/nfit.c
+++ b/drivers/acpi/nfit.c
@@ -469,37 +469,16 @@
nfit_mem->bdw = NULL;
}
-static int nfit_mem_add(struct acpi_nfit_desc *acpi_desc,
+static void nfit_mem_init_bdw(struct acpi_nfit_desc *acpi_desc,
struct nfit_mem *nfit_mem, struct acpi_nfit_system_address *spa)
{
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
struct nfit_memdev *nfit_memdev;
struct nfit_flush *nfit_flush;
- struct nfit_dcr *nfit_dcr;
struct nfit_bdw *nfit_bdw;
struct nfit_idt *nfit_idt;
u16 idt_idx, range_index;
- list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
- if (nfit_dcr->dcr->region_index != dcr)
- continue;
- nfit_mem->dcr = nfit_dcr->dcr;
- break;
- }
-
- if (!nfit_mem->dcr) {
- dev_dbg(acpi_desc->dev, "SPA %d missing:%s%s\n",
- spa->range_index, __to_nfit_memdev(nfit_mem)
- ? "" : " MEMDEV", nfit_mem->dcr ? "" : " DCR");
- return -ENODEV;
- }
-
- /*
- * We've found enough to create an nvdimm, optionally
- * find an associated BDW
- */
- list_add(&nfit_mem->list, &acpi_desc->dimms);
-
list_for_each_entry(nfit_bdw, &acpi_desc->bdws, list) {
if (nfit_bdw->bdw->region_index != dcr)
continue;
@@ -508,12 +487,12 @@
}
if (!nfit_mem->bdw)
- return 0;
+ return;
nfit_mem_find_spa_bdw(acpi_desc, nfit_mem);
if (!nfit_mem->spa_bdw)
- return 0;
+ return;
range_index = nfit_mem->spa_bdw->range_index;
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
@@ -538,8 +517,6 @@
}
break;
}
-
- return 0;
}
static int nfit_mem_dcr_init(struct acpi_nfit_desc *acpi_desc,
@@ -548,7 +525,6 @@
struct nfit_mem *nfit_mem, *found;
struct nfit_memdev *nfit_memdev;
int type = nfit_spa_type(spa);
- u16 dcr;
switch (type) {
case NFIT_SPA_DCR:
@@ -559,14 +535,18 @@
}
list_for_each_entry(nfit_memdev, &acpi_desc->memdevs, list) {
- int rc;
+ struct nfit_dcr *nfit_dcr;
+ u32 device_handle;
+ u16 dcr;
if (nfit_memdev->memdev->range_index != spa->range_index)
continue;
found = NULL;
dcr = nfit_memdev->memdev->region_index;
+ device_handle = nfit_memdev->memdev->device_handle;
list_for_each_entry(nfit_mem, &acpi_desc->dimms, list)
- if (__to_nfit_memdev(nfit_mem)->region_index == dcr) {
+ if (__to_nfit_memdev(nfit_mem)->device_handle
+ == device_handle) {
found = nfit_mem;
break;
}
@@ -579,6 +559,31 @@
if (!nfit_mem)
return -ENOMEM;
INIT_LIST_HEAD(&nfit_mem->list);
+ list_add(&nfit_mem->list, &acpi_desc->dimms);
+ }
+
+ list_for_each_entry(nfit_dcr, &acpi_desc->dcrs, list) {
+ if (nfit_dcr->dcr->region_index != dcr)
+ continue;
+ /*
+ * Record the control region for the dimm. For
+ * the ACPI 6.1 case, where there are separate
+ * control regions for the pmem vs blk
+ * interfaces, be sure to record the extended
+ * blk details.
+ */
+ if (!nfit_mem->dcr)
+ nfit_mem->dcr = nfit_dcr->dcr;
+ else if (nfit_mem->dcr->windows == 0
+ && nfit_dcr->dcr->windows)
+ nfit_mem->dcr = nfit_dcr->dcr;
+ break;
+ }
+
+ if (dcr && !nfit_mem->dcr) {
+ dev_err(acpi_desc->dev, "SPA %d missing DCR %d\n",
+ spa->range_index, dcr);
+ return -ENODEV;
}
if (type == NFIT_SPA_DCR) {
@@ -595,6 +600,7 @@
nfit_mem->idt_dcr = nfit_idt->idt;
break;
}
+ nfit_mem_init_bdw(acpi_desc, nfit_mem, spa);
} else {
/*
* A single dimm may belong to multiple SPA-PM
@@ -603,13 +609,6 @@
*/
nfit_mem->memdev_pmem = nfit_memdev->memdev;
}
-
- if (found)
- continue;
-
- rc = nfit_mem_add(acpi_desc, nfit_mem, spa);
- if (rc)
- return rc;
}
return 0;
@@ -1504,9 +1503,7 @@
case 1:
/* ARS unsupported, but we should never get here */
return 0;
- case 2:
- return -EINVAL;
- case 3:
+ case 6:
/* ARS is in progress */
msleep(1000);
break;
@@ -1517,13 +1514,13 @@
}
static int ars_get_status(struct nvdimm_bus_descriptor *nd_desc,
- struct nd_cmd_ars_status *cmd)
+ struct nd_cmd_ars_status *cmd, u32 size)
{
int rc;
while (1) {
rc = nd_desc->ndctl(nd_desc, NULL, ND_CMD_ARS_STATUS, cmd,
- sizeof(*cmd));
+ size);
if (rc || cmd->status & 0xffff)
return -ENXIO;
@@ -1538,6 +1535,8 @@
case 2:
/* No ARS performed for the current boot */
return 0;
+ case 3:
+ /* TODO: error list overflow support */
default:
return -ENXIO;
}
@@ -1581,6 +1580,7 @@
struct nd_cmd_ars_start *ars_start = NULL;
struct nd_cmd_ars_cap *ars_cap = NULL;
u64 start, len, cur, remaining;
+ u32 ars_status_size;
int rc;
ars_cap = kzalloc(sizeof(*ars_cap), GFP_KERNEL);
@@ -1610,14 +1610,14 @@
* Check if a full-range ARS has been run. If so, use those results
* without having to start a new ARS.
*/
- ars_status = kzalloc(ars_cap->max_ars_out + sizeof(*ars_status),
- GFP_KERNEL);
+ ars_status_size = ars_cap->max_ars_out;
+ ars_status = kzalloc(ars_status_size, GFP_KERNEL);
if (!ars_status) {
rc = -ENOMEM;
goto out;
}
- rc = ars_get_status(nd_desc, ars_status);
+ rc = ars_get_status(nd_desc, ars_status, ars_status_size);
if (rc)
goto out;
@@ -1647,7 +1647,7 @@
if (rc)
goto out;
- rc = ars_get_status(nd_desc, ars_status);
+ rc = ars_get_status(nd_desc, ars_status, ars_status_size);
if (rc)
goto out;
diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c
index d30184c..c8e169e 100644
--- a/drivers/acpi/pci_irq.c
+++ b/drivers/acpi/pci_irq.c
@@ -406,7 +406,7 @@
return 0;
}
- if (pci_has_managed_irq(dev))
+ if (dev->irq_managed && dev->irq > 0)
return 0;
entry = acpi_pci_irq_lookup(dev, pin);
@@ -451,7 +451,8 @@
kfree(entry);
return rc;
}
- pci_set_managed_irq(dev, rc);
+ dev->irq = rc;
+ dev->irq_managed = 1;
if (link)
snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link);
@@ -474,9 +475,17 @@
u8 pin;
pin = dev->pin;
- if (!pin || !pci_has_managed_irq(dev))
+ if (!pin || !dev->irq_managed || dev->irq <= 0)
return;
+ /* Keep IOAPIC pin configuration when suspending */
+ if (dev->dev.power.is_prepared)
+ return;
+#ifdef CONFIG_PM
+ if (dev->dev.power.runtime_status == RPM_SUSPENDING)
+ return;
+#endif
+
entry = acpi_pci_irq_lookup(dev, pin);
if (!entry)
return;
@@ -496,6 +505,6 @@
dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin));
if (gsi >= 0) {
acpi_unregister_gsi(gsi);
- pci_reset_managed_irq(dev);
+ dev->irq_managed = 0;
}
}
diff --git a/drivers/acpi/pci_link.c b/drivers/acpi/pci_link.c
index fa28635..ededa90 100644
--- a/drivers/acpi/pci_link.c
+++ b/drivers/acpi/pci_link.c
@@ -4,7 +4,6 @@
* Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
* Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
* Copyright (C) 2002 Dominik Brodowski <devel@brodo.de>
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -438,6 +437,7 @@
* enabled system.
*/
+#define ACPI_MAX_IRQS 256
#define ACPI_MAX_ISA_IRQ 16
#define PIRQ_PENALTY_PCI_AVAILABLE (0)
@@ -447,7 +447,7 @@
#define PIRQ_PENALTY_ISA_USED (16*16*16*16*16)
#define PIRQ_PENALTY_ISA_ALWAYS (16*16*16*16*16*16)
-static int acpi_irq_isa_penalty[ACPI_MAX_ISA_IRQ] = {
+static int acpi_irq_penalty[ACPI_MAX_IRQS] = {
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ0 timer */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ1 keyboard */
PIRQ_PENALTY_ISA_ALWAYS, /* IRQ2 cascade */
@@ -464,68 +464,9 @@
PIRQ_PENALTY_ISA_USED, /* IRQ13 fpe, sometimes */
PIRQ_PENALTY_ISA_USED, /* IRQ14 ide0 */
PIRQ_PENALTY_ISA_USED, /* IRQ15 ide1 */
+ /* >IRQ15 */
};
-struct irq_penalty_info {
- int irq;
- int penalty;
- struct list_head node;
-};
-
-static LIST_HEAD(acpi_irq_penalty_list);
-
-static int acpi_irq_get_penalty(int irq)
-{
- struct irq_penalty_info *irq_info;
-
- if (irq < ACPI_MAX_ISA_IRQ)
- return acpi_irq_isa_penalty[irq];
-
- list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
- if (irq_info->irq == irq)
- return irq_info->penalty;
- }
-
- return 0;
-}
-
-static int acpi_irq_set_penalty(int irq, int new_penalty)
-{
- struct irq_penalty_info *irq_info;
-
- /* see if this is a ISA IRQ */
- if (irq < ACPI_MAX_ISA_IRQ) {
- acpi_irq_isa_penalty[irq] = new_penalty;
- return 0;
- }
-
- /* next, try to locate from the dynamic list */
- list_for_each_entry(irq_info, &acpi_irq_penalty_list, node) {
- if (irq_info->irq == irq) {
- irq_info->penalty = new_penalty;
- return 0;
- }
- }
-
- /* nope, let's allocate a slot for this IRQ */
- irq_info = kzalloc(sizeof(*irq_info), GFP_KERNEL);
- if (!irq_info)
- return -ENOMEM;
-
- irq_info->irq = irq;
- irq_info->penalty = new_penalty;
- list_add_tail(&irq_info->node, &acpi_irq_penalty_list);
-
- return 0;
-}
-
-static void acpi_irq_add_penalty(int irq, int penalty)
-{
- int curpen = acpi_irq_get_penalty(irq);
-
- acpi_irq_set_penalty(irq, curpen + penalty);
-}
-
int __init acpi_irq_penalty_init(void)
{
struct acpi_pci_link *link;
@@ -546,16 +487,15 @@
link->irq.possible_count;
for (i = 0; i < link->irq.possible_count; i++) {
- if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ) {
- int irqpos = link->irq.possible[i];
-
- acpi_irq_add_penalty(irqpos, penalty);
- }
+ if (link->irq.possible[i] < ACPI_MAX_ISA_IRQ)
+ acpi_irq_penalty[link->irq.
+ possible[i]] +=
+ penalty;
}
} else if (link->irq.active) {
- acpi_irq_add_penalty(link->irq.active,
- PIRQ_PENALTY_PCI_POSSIBLE);
+ acpi_irq_penalty[link->irq.active] +=
+ PIRQ_PENALTY_PCI_POSSIBLE;
}
}
@@ -607,12 +547,12 @@
* the use of IRQs 9, 10, 11, and >15.
*/
for (i = (link->irq.possible_count - 1); i >= 0; i--) {
- if (acpi_irq_get_penalty(irq) >
- acpi_irq_get_penalty(link->irq.possible[i]))
+ if (acpi_irq_penalty[irq] >
+ acpi_irq_penalty[link->irq.possible[i]])
irq = link->irq.possible[i];
}
}
- if (acpi_irq_get_penalty(irq) >= PIRQ_PENALTY_ISA_ALWAYS) {
+ if (acpi_irq_penalty[irq] >= PIRQ_PENALTY_ISA_ALWAYS) {
printk(KERN_ERR PREFIX "No IRQ available for %s [%s]. "
"Try pci=noacpi or acpi=off\n",
acpi_device_name(link->device),
@@ -628,8 +568,7 @@
acpi_device_bid(link->device));
return -ENODEV;
} else {
- acpi_irq_add_penalty(link->irq.active, PIRQ_PENALTY_PCI_USING);
-
+ acpi_irq_penalty[link->irq.active] += PIRQ_PENALTY_PCI_USING;
printk(KERN_WARNING PREFIX "%s [%s] enabled at IRQ %d\n",
acpi_device_name(link->device),
acpi_device_bid(link->device), link->irq.active);
@@ -839,7 +778,7 @@
}
/*
- * modify penalty from cmdline
+ * modify acpi_irq_penalty[] from cmdline
*/
static int __init acpi_irq_penalty_update(char *str, int used)
{
@@ -857,10 +796,13 @@
if (irq < 0)
continue;
+ if (irq >= ARRAY_SIZE(acpi_irq_penalty))
+ continue;
+
if (used)
- acpi_irq_add_penalty(irq, PIRQ_PENALTY_ISA_USED);
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
else
- acpi_irq_set_penalty(irq, PIRQ_PENALTY_PCI_AVAILABLE);
+ acpi_irq_penalty[irq] = PIRQ_PENALTY_PCI_AVAILABLE;
if (retval != 2) /* no next number */
break;
@@ -877,15 +819,18 @@
*/
void acpi_penalize_isa_irq(int irq, int active)
{
- if (irq >= 0)
- acpi_irq_add_penalty(irq, active ?
- PIRQ_PENALTY_ISA_USED : PIRQ_PENALTY_PCI_USING);
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (active)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_USED;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
}
bool acpi_isa_irq_available(int irq)
{
- return irq >= 0 &&
- (acpi_irq_get_penalty(irq) < PIRQ_PENALTY_ISA_ALWAYS);
+ return irq >= 0 && (irq >= ARRAY_SIZE(acpi_irq_penalty) ||
+ acpi_irq_penalty[irq] < PIRQ_PENALTY_ISA_ALWAYS);
}
/*
@@ -895,18 +840,13 @@
*/
void acpi_penalize_sci_irq(int irq, int trigger, int polarity)
{
- int penalty;
-
- if (irq < 0)
- return;
-
- if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
- polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
- penalty = PIRQ_PENALTY_ISA_ALWAYS;
- else
- penalty = PIRQ_PENALTY_PCI_USING;
-
- acpi_irq_add_penalty(irq, penalty);
+ if (irq >= 0 && irq < ARRAY_SIZE(acpi_irq_penalty)) {
+ if (trigger != ACPI_MADT_TRIGGER_LEVEL ||
+ polarity != ACPI_MADT_POLARITY_ACTIVE_LOW)
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_ISA_ALWAYS;
+ else
+ acpi_irq_penalty[irq] += PIRQ_PENALTY_PCI_USING;
+ }
}
/*
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index a39e85f..7d00b7a 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -2074,7 +2074,7 @@
if (get_user(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
- ptr += sizeof(void *);
+ ptr += sizeof(cookie);
list_for_each_entry(w, &proc->delivered_death, entry) {
struct binder_ref_death *tmp_death = container_of(w, struct binder_ref_death, work);
diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c
index 594fcab..546a369 100644
--- a/drivers/ata/ahci.c
+++ b/drivers/ata/ahci.c
@@ -264,6 +264,26 @@
{ PCI_VDEVICE(INTEL, 0x3b2b), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2c), board_ahci }, /* PCH RAID */
{ PCI_VDEVICE(INTEL, 0x3b2f), board_ahci }, /* PCH AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19b7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19bF), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c0), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c1), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c2), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c3), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c4), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c5), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c6), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19c7), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cE), board_ahci }, /* DNV AHCI */
+ { PCI_VDEVICE(INTEL, 0x19cF), board_ahci }, /* DNV AHCI */
{ PCI_VDEVICE(INTEL, 0x1c02), board_ahci }, /* CPT AHCI */
{ PCI_VDEVICE(INTEL, 0x1c03), board_ahci }, /* CPT AHCI */
{ PCI_VDEVICE(INTEL, 0x1c04), board_ahci }, /* CPT RAID */
diff --git a/drivers/ata/ahci.h b/drivers/ata/ahci.h
index a4faa43..a44c75d 100644
--- a/drivers/ata/ahci.h
+++ b/drivers/ata/ahci.h
@@ -250,6 +250,7 @@
AHCI_HFLAG_MULTI_MSI = 0,
AHCI_HFLAG_MULTI_MSIX = 0,
#endif
+ AHCI_HFLAG_WAKE_BEFORE_STOP = (1 << 22), /* wake before DMA stop */
/* ap->flags bits */
diff --git a/drivers/ata/ahci_brcmstb.c b/drivers/ata/ahci_brcmstb.c
index b36cae2..e87bcec 100644
--- a/drivers/ata/ahci_brcmstb.c
+++ b/drivers/ata/ahci_brcmstb.c
@@ -317,6 +317,7 @@
if (IS_ERR(hpriv))
return PTR_ERR(hpriv);
hpriv->plat_data = priv;
+ hpriv->flags = AHCI_HFLAG_WAKE_BEFORE_STOP;
brcm_sata_alpm_init(hpriv);
diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
index d61740e..4029679 100644
--- a/drivers/ata/libahci.c
+++ b/drivers/ata/libahci.c
@@ -496,8 +496,8 @@
}
}
- /* fabricate port_map from cap.nr_ports */
- if (!port_map) {
+ /* fabricate port_map from cap.nr_ports for < AHCI 1.3 */
+ if (!port_map && vers < 0x10300) {
port_map = (1 << ahci_nr_ports(cap)) - 1;
dev_warn(dev, "forcing PORTS_IMPL to 0x%x\n", port_map);
@@ -593,8 +593,22 @@
int ahci_stop_engine(struct ata_port *ap)
{
void __iomem *port_mmio = ahci_port_base(ap);
+ struct ahci_host_priv *hpriv = ap->host->private_data;
u32 tmp;
+ /*
+ * On some controllers, stopping a port's DMA engine while the port
+ * is in ALPM state (partial or slumber) results in failures on
+ * subsequent DMA engine starts. For those controllers, put the
+ * port back in active state before stopping its DMA engine.
+ */
+ if ((hpriv->flags & AHCI_HFLAG_WAKE_BEFORE_STOP) &&
+ (ap->link.lpm_policy > ATA_LPM_MAX_POWER) &&
+ ahci_set_lpm(&ap->link, ATA_LPM_MAX_POWER, ATA_LPM_WAKE_ONLY)) {
+ dev_err(ap->host->dev, "Failed to wake up port before engine stop\n");
+ return -EIO;
+ }
+
tmp = readl(port_mmio + PORT_CMD);
/* check if the HBA is idle */
@@ -689,6 +703,9 @@
void __iomem *port_mmio = ahci_port_base(ap);
if (policy != ATA_LPM_MAX_POWER) {
+ /* wakeup flag only applies to the max power policy */
+ hints &= ~ATA_LPM_WAKE_ONLY;
+
/*
* Disable interrupts on Phy Ready. This keeps us from
* getting woken up due to spurious phy ready
@@ -704,7 +721,8 @@
u32 cmd = readl(port_mmio + PORT_CMD);
if (policy == ATA_LPM_MAX_POWER || !(hints & ATA_LPM_HIPM)) {
- cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
+ if (!(hints & ATA_LPM_WAKE_ONLY))
+ cmd &= ~(PORT_CMD_ASP | PORT_CMD_ALPE);
cmd |= PORT_CMD_ICC_ACTIVE;
writel(cmd, port_mmio + PORT_CMD);
@@ -712,6 +730,9 @@
/* wait 10ms to be sure we've come out of LPM state */
ata_msleep(ap, 10);
+
+ if (hints & ATA_LPM_WAKE_ONLY)
+ return 0;
} else {
cmd |= PORT_CMD_ALPE;
if (policy == ATA_LPM_MIN_POWER)
diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
index cbb7471..55e257c 100644
--- a/drivers/ata/libata-core.c
+++ b/drivers/ata/libata-core.c
@@ -4125,6 +4125,7 @@
{ "SAMSUNG CD-ROM SN-124", "N001", ATA_HORKAGE_NODMA },
{ "Seagate STT20000A", NULL, ATA_HORKAGE_NODMA },
{ " 2GB ATA Flash Disk", "ADMA428M", ATA_HORKAGE_NODMA },
+ { "VRFDFC22048UCHC-TE*", NULL, ATA_HORKAGE_NODMA },
/* Odd clown on sil3726/4726 PMPs */
{ "Config Disk", NULL, ATA_HORKAGE_DISABLE },
diff --git a/drivers/ata/libata-sff.c b/drivers/ata/libata-sff.c
index cdf6215..051b615 100644
--- a/drivers/ata/libata-sff.c
+++ b/drivers/ata/libata-sff.c
@@ -997,12 +997,9 @@
static void ata_hsm_qc_complete(struct ata_queued_cmd *qc, int in_wq)
{
struct ata_port *ap = qc->ap;
- unsigned long flags;
if (ap->ops->error_handler) {
if (in_wq) {
- spin_lock_irqsave(ap->lock, flags);
-
/* EH might have kicked in while host lock is
* released.
*/
@@ -1014,8 +1011,6 @@
} else
ata_port_freeze(ap);
}
-
- spin_unlock_irqrestore(ap->lock, flags);
} else {
if (likely(!(qc->err_mask & AC_ERR_HSM)))
ata_qc_complete(qc);
@@ -1024,10 +1019,8 @@
}
} else {
if (in_wq) {
- spin_lock_irqsave(ap->lock, flags);
ata_sff_irq_on(ap);
ata_qc_complete(qc);
- spin_unlock_irqrestore(ap->lock, flags);
} else
ata_qc_complete(qc);
}
@@ -1048,9 +1041,10 @@
{
struct ata_link *link = qc->dev->link;
struct ata_eh_info *ehi = &link->eh_info;
- unsigned long flags = 0;
int poll_next;
+ lockdep_assert_held(ap->lock);
+
WARN_ON_ONCE((qc->flags & ATA_QCFLAG_ACTIVE) == 0);
/* Make sure ata_sff_qc_issue() does not throw things
@@ -1112,14 +1106,6 @@
}
}
- /* Send the CDB (atapi) or the first data block (ata pio out).
- * During the state transition, interrupt handler shouldn't
- * be invoked before the data transfer is complete and
- * hsm_task_state is changed. Hence, the following locking.
- */
- if (in_wq)
- spin_lock_irqsave(ap->lock, flags);
-
if (qc->tf.protocol == ATA_PROT_PIO) {
/* PIO data out protocol.
* send first data block.
@@ -1135,9 +1121,6 @@
/* send CDB */
atapi_send_cdb(ap, qc);
- if (in_wq)
- spin_unlock_irqrestore(ap->lock, flags);
-
/* if polling, ata_sff_pio_task() handles the rest.
* otherwise, interrupt handler takes over from here.
*/
@@ -1296,7 +1279,8 @@
break;
default:
poll_next = 0;
- BUG();
+ WARN(true, "ata%d: SFF host state machine in invalid state %d",
+ ap->print_id, ap->hsm_task_state);
}
return poll_next;
@@ -1361,12 +1345,14 @@
u8 status;
int poll_next;
+ spin_lock_irq(ap->lock);
+
BUG_ON(ap->sff_pio_task_link == NULL);
/* qc can be NULL if timeout occurred */
qc = ata_qc_from_tag(ap, link->active_tag);
if (!qc) {
ap->sff_pio_task_link = NULL;
- return;
+ goto out_unlock;
}
fsm_start:
@@ -1381,11 +1367,14 @@
*/
status = ata_sff_busy_wait(ap, ATA_BUSY, 5);
if (status & ATA_BUSY) {
+ spin_unlock_irq(ap->lock);
ata_msleep(ap, 2);
+ spin_lock_irq(ap->lock);
+
status = ata_sff_busy_wait(ap, ATA_BUSY, 10);
if (status & ATA_BUSY) {
ata_sff_queue_pio_task(link, ATA_SHORT_PAUSE);
- return;
+ goto out_unlock;
}
}
@@ -1402,6 +1391,8 @@
*/
if (poll_next)
goto fsm_start;
+out_unlock:
+ spin_unlock_irq(ap->lock);
}
/**
diff --git a/drivers/base/component.c b/drivers/base/component.c
index 89f5cf68..04a1582 100644
--- a/drivers/base/component.c
+++ b/drivers/base/component.c
@@ -206,6 +206,8 @@
if (mc->release)
mc->release(master, mc->data);
}
+
+ kfree(match->compare);
}
static void devm_component_match_release(struct device *dev, void *res)
@@ -221,14 +223,14 @@
if (match->alloc == num)
return 0;
- new = devm_kmalloc_array(dev, num, sizeof(*new), GFP_KERNEL);
+ new = kmalloc_array(num, sizeof(*new), GFP_KERNEL);
if (!new)
return -ENOMEM;
if (match->compare) {
memcpy(new, match->compare, sizeof(*new) *
min(match->num, num));
- devm_kfree(dev, match->compare);
+ kfree(match->compare);
}
match->compare = new;
match->alloc = num;
@@ -283,6 +285,24 @@
}
EXPORT_SYMBOL(component_match_add_release);
+static void free_master(struct master *master)
+{
+ struct component_match *match = master->match;
+ int i;
+
+ list_del(&master->node);
+
+ if (match) {
+ for (i = 0; i < match->num; i++) {
+ struct component *c = match->compare[i].component;
+ if (c)
+ c->master = NULL;
+ }
+ }
+
+ kfree(master);
+}
+
int component_master_add_with_match(struct device *dev,
const struct component_master_ops *ops,
struct component_match *match)
@@ -309,11 +329,9 @@
ret = try_to_bring_up_master(master, NULL);
- if (ret < 0) {
- /* Delete off the list if we weren't successful */
- list_del(&master->node);
- kfree(master);
- }
+ if (ret < 0)
+ free_master(master);
+
mutex_unlock(&component_mutex);
return ret < 0 ? ret : 0;
@@ -324,25 +342,12 @@
const struct component_master_ops *ops)
{
struct master *master;
- int i;
mutex_lock(&component_mutex);
master = __master_find(dev, ops);
if (master) {
- struct component_match *match = master->match;
-
take_down_master(master);
-
- list_del(&master->node);
-
- if (match) {
- for (i = 0; i < match->num; i++) {
- struct component *c = match->compare[i].component;
- if (c)
- c->master = NULL;
- }
- }
- kfree(master);
+ free_master(master);
}
mutex_unlock(&component_mutex);
}
@@ -486,6 +491,8 @@
ret = try_to_bring_up_masters(component);
if (ret < 0) {
+ if (component->master)
+ remove_component(component->master, component);
list_del(&component->node);
kfree(component);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 9e25120..84708a5 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -866,7 +866,7 @@
}
/* locks the driver */
-static int lock_fdc(int drive, bool interruptible)
+static int lock_fdc(int drive)
{
if (WARN(atomic_read(&usage_count) == 0,
"Trying to lock fdc while usage count=0\n"))
@@ -2173,7 +2173,7 @@
{
int ret;
- if (lock_fdc(drive, true))
+ if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
@@ -2960,7 +2960,7 @@
{
int ret;
- if (lock_fdc(drive, interruptible))
+ if (lock_fdc(drive))
return -EINTR;
if (arg == FD_RESET_ALWAYS)
@@ -3243,7 +3243,7 @@
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
mutex_lock(&open_lock);
- if (lock_fdc(drive, true)) {
+ if (lock_fdc(drive)) {
mutex_unlock(&open_lock);
return -EINTR;
}
@@ -3263,7 +3263,7 @@
} else {
int oldStretch;
- if (lock_fdc(drive, true))
+ if (lock_fdc(drive))
return -EINTR;
if (cmd != FDDEFPRM) {
/* notice a disk change immediately, else
@@ -3349,7 +3349,7 @@
if (type)
*g = &floppy_type[type];
else {
- if (lock_fdc(drive, false))
+ if (lock_fdc(drive))
return -EINTR;
if (poll_drive(false, 0) == -EINTR)
return -EINTR;
@@ -3433,7 +3433,7 @@
if (UDRS->fd_ref != 1)
/* somebody else has this drive open */
return -EBUSY;
- if (lock_fdc(drive, true))
+ if (lock_fdc(drive))
return -EINTR;
/* do the actual eject. Fails on
@@ -3445,7 +3445,7 @@
process_fd_request();
return ret;
case FDCLRPRM:
- if (lock_fdc(drive, true))
+ if (lock_fdc(drive))
return -EINTR;
current_type[drive] = NULL;
floppy_sizes[drive] = MAX_DISK_SIZE << 1;
@@ -3467,7 +3467,7 @@
UDP->flags &= ~FTD_MSG;
return 0;
case FDFMTBEG:
- if (lock_fdc(drive, true))
+ if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
@@ -3484,7 +3484,7 @@
return do_format(drive, &inparam.f);
case FDFMTEND:
case FDFLUSH:
- if (lock_fdc(drive, true))
+ if (lock_fdc(drive))
return -EINTR;
return invalidate_drive(bdev);
case FDSETEMSGTRESH:
@@ -3507,7 +3507,7 @@
outparam = UDP;
break;
case FDPOLLDRVSTAT:
- if (lock_fdc(drive, true))
+ if (lock_fdc(drive))
return -EINTR;
if (poll_drive(true, FD_RAW_NEED_DISK) == -EINTR)
return -EINTR;
@@ -3530,7 +3530,7 @@
case FDRAWCMD:
if (type)
return -EINVAL;
- if (lock_fdc(drive, true))
+ if (lock_fdc(drive))
return -EINTR;
set_floppy(drive);
i = raw_cmd_ioctl(cmd, (void __user *)param);
@@ -3539,7 +3539,7 @@
process_fd_request();
return i;
case FDTWADDLE:
- if (lock_fdc(drive, true))
+ if (lock_fdc(drive))
return -EINTR;
twaddle();
process_fd_request();
@@ -3663,6 +3663,11 @@
opened_bdev[drive] = bdev;
+ if (!(mode & (FMODE_READ|FMODE_WRITE))) {
+ res = -EINVAL;
+ goto out;
+ }
+
res = -ENXIO;
if (!floppy_track_buffer) {
@@ -3706,21 +3711,20 @@
if (UFDCS->rawcmd == 1)
UFDCS->rawcmd = 2;
- if (!(mode & FMODE_NDELAY)) {
- if (mode & (FMODE_READ|FMODE_WRITE)) {
- UDRS->last_checked = 0;
- clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
- check_disk_change(bdev);
- if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
- goto out;
- if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
- goto out;
- }
- res = -EROFS;
- if ((mode & FMODE_WRITE) &&
- !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
- goto out;
- }
+ UDRS->last_checked = 0;
+ clear_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags);
+ check_disk_change(bdev);
+ if (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags))
+ goto out;
+ if (test_bit(FD_OPEN_SHOULD_FAIL_BIT, &UDRS->flags))
+ goto out;
+
+ res = -EROFS;
+
+ if ((mode & FMODE_WRITE) &&
+ !test_bit(FD_DISK_WRITABLE_BIT, &UDRS->flags))
+ goto out;
+
mutex_unlock(&open_lock);
mutex_unlock(&floppy_mutex);
return 0;
@@ -3748,7 +3752,8 @@
return DISK_EVENT_MEDIA_CHANGE;
if (time_after(jiffies, UDRS->last_checked + UDP->checkfreq)) {
- lock_fdc(drive, false);
+ if (lock_fdc(drive))
+ return -EINTR;
poll_drive(false, 0);
process_fd_request();
}
@@ -3847,7 +3852,9 @@
"VFS: revalidate called on non-open device.\n"))
return -EFAULT;
- lock_fdc(drive, false);
+ res = lock_fdc(drive);
+ if (res)
+ return res;
cf = (test_bit(FD_DISK_CHANGED_BIT, &UDRS->flags) ||
test_bit(FD_VERIFY_BIT, &UDRS->flags));
if (!(cf || test_bit(drive, &fake_change) || drive_no_geom(drive))) {
diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c
index 8ba1e97..64a7b59 100644
--- a/drivers/block/null_blk.c
+++ b/drivers/block/null_blk.c
@@ -478,7 +478,7 @@
id->ver_id = 0x1;
id->vmnt = 0;
id->cgrps = 1;
- id->cap = 0x3;
+ id->cap = 0x2;
id->dom = 0x1;
id->ppaf.blk_offset = 0;
@@ -707,9 +707,7 @@
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, nullb->q);
queue_flag_clear_unlocked(QUEUE_FLAG_ADD_RANDOM, nullb->q);
-
mutex_lock(&lock);
- list_add_tail(&nullb->list, &nullb_list);
nullb->index = nullb_indexes++;
mutex_unlock(&lock);
@@ -743,6 +741,10 @@
strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN);
add_disk(disk);
+
+ mutex_lock(&lock);
+ list_add_tail(&nullb->list, &nullb_list);
+ mutex_unlock(&lock);
done:
return 0;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 8a8dc91..83eb9e6 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1873,6 +1873,43 @@
return err;
}
+static int negotiate_mq(struct blkfront_info *info)
+{
+ unsigned int backend_max_queues = 0;
+ int err;
+ unsigned int i;
+
+ BUG_ON(info->nr_rings);
+
+ /* Check if backend supports multiple queues. */
+ err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
+ "multi-queue-max-queues", "%u", &backend_max_queues);
+ if (err < 0)
+ backend_max_queues = 1;
+
+ info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
+ /* We need at least one ring. */
+ if (!info->nr_rings)
+ info->nr_rings = 1;
+
+ info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
+ if (!info->rinfo) {
+ xenbus_dev_fatal(info->xbdev, -ENOMEM, "allocating ring_info structure");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < info->nr_rings; i++) {
+ struct blkfront_ring_info *rinfo;
+
+ rinfo = &info->rinfo[i];
+ INIT_LIST_HEAD(&rinfo->indirect_pages);
+ INIT_LIST_HEAD(&rinfo->grants);
+ rinfo->dev_info = info;
+ INIT_WORK(&rinfo->work, blkif_restart_queue);
+ spin_lock_init(&rinfo->ring_lock);
+ }
+ return 0;
+}
/**
* Entry point to this code when a new device is created. Allocate the basic
* structures and the ring buffer for communication with the backend, and
@@ -1883,9 +1920,7 @@
const struct xenbus_device_id *id)
{
int err, vdevice;
- unsigned int r_index;
struct blkfront_info *info;
- unsigned int backend_max_queues = 0;
/* FIXME: Use dynamic device id if this is not set. */
err = xenbus_scanf(XBT_NIL, dev->nodename,
@@ -1936,33 +1971,10 @@
}
info->xbdev = dev;
- /* Check if backend supports multiple queues. */
- err = xenbus_scanf(XBT_NIL, info->xbdev->otherend,
- "multi-queue-max-queues", "%u", &backend_max_queues);
- if (err < 0)
- backend_max_queues = 1;
-
- info->nr_rings = min(backend_max_queues, xen_blkif_max_queues);
- /* We need at least one ring. */
- if (!info->nr_rings)
- info->nr_rings = 1;
-
- info->rinfo = kzalloc(sizeof(struct blkfront_ring_info) * info->nr_rings, GFP_KERNEL);
- if (!info->rinfo) {
- xenbus_dev_fatal(dev, -ENOMEM, "allocating ring_info structure");
+ err = negotiate_mq(info);
+ if (err) {
kfree(info);
- return -ENOMEM;
- }
-
- for (r_index = 0; r_index < info->nr_rings; r_index++) {
- struct blkfront_ring_info *rinfo;
-
- rinfo = &info->rinfo[r_index];
- INIT_LIST_HEAD(&rinfo->indirect_pages);
- INIT_LIST_HEAD(&rinfo->grants);
- rinfo->dev_info = info;
- INIT_WORK(&rinfo->work, blkif_restart_queue);
- spin_lock_init(&rinfo->ring_lock);
+ return err;
}
mutex_init(&info->mutex);
@@ -2123,12 +2135,16 @@
static int blkfront_resume(struct xenbus_device *dev)
{
struct blkfront_info *info = dev_get_drvdata(&dev->dev);
- int err;
+ int err = 0;
dev_dbg(&dev->dev, "blkfront_resume: %s\n", dev->nodename);
blkif_free(info, info->connected == BLKIF_STATE_CONNECTED);
+ err = negotiate_mq(info);
+ if (err)
+ return err;
+
err = talk_to_blkback(dev, info);
/*
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index 240b6cf..be54e53 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -42,7 +42,7 @@
/*
* The High Precision Event Timer driver.
* This driver is closely modelled after the rtc.c driver.
- * http://www.intel.com/hardwaredesign/hpetspec_1.pdf
+ * See HPET spec revision 1.
*/
#define HPET_USER_FREQ (64)
#define HPET_DRIFT (500)
diff --git a/drivers/char/random.c b/drivers/char/random.c
index d0da5d8..b583e53 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -1819,6 +1819,28 @@
EXPORT_SYMBOL(get_random_int);
/*
+ * Same as get_random_int(), but returns unsigned long.
+ */
+unsigned long get_random_long(void)
+{
+ __u32 *hash;
+ unsigned long ret;
+
+ if (arch_get_random_long(&ret))
+ return ret;
+
+ hash = get_cpu_var(get_random_int_hash);
+
+ hash[0] += current->pid + jiffies + random_get_entropy();
+ md5_transform(hash, random_int_secret);
+ ret = *(unsigned long *)hash;
+ put_cpu_var(get_random_int_hash);
+
+ return ret;
+}
+EXPORT_SYMBOL(get_random_long);
+
+/*
* randomize_range() returns a start address such that
*
* [...... <range> .....]
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index b038e36..bae4be6 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -43,7 +43,7 @@
obj-$(CONFIG_COMMON_CLK_SI570) += clk-si570.o
obj-$(CONFIG_COMMON_CLK_CDCE925) += clk-cdce925.o
obj-$(CONFIG_ARCH_STM32) += clk-stm32f4.o
-obj-$(CONFIG_ARCH_TANGOX) += clk-tango4.o
+obj-$(CONFIG_ARCH_TANGO) += clk-tango4.o
obj-$(CONFIG_CLK_TWL6040) += clk-twl6040.o
obj-$(CONFIG_ARCH_U300) += clk-u300.o
obj-$(CONFIG_ARCH_VT8500) += clk-vt8500.o
diff --git a/drivers/clk/clk-gpio.c b/drivers/clk/clk-gpio.c
index 19fed65..7b09a26 100644
--- a/drivers/clk/clk-gpio.c
+++ b/drivers/clk/clk-gpio.c
@@ -289,7 +289,7 @@
num_parents = of_clk_get_parent_count(node);
if (num_parents < 0)
- return;
+ num_parents = 0;
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
diff --git a/drivers/clk/clk-scpi.c b/drivers/clk/clk-scpi.c
index cd0f272..89e9ca7 100644
--- a/drivers/clk/clk-scpi.c
+++ b/drivers/clk/clk-scpi.c
@@ -299,7 +299,7 @@
/* Add the virtual cpufreq device */
cpufreq_dev = platform_device_register_simple("scpi-cpufreq",
-1, NULL, 0);
- if (!cpufreq_dev)
+ if (IS_ERR(cpufreq_dev))
pr_warn("unable to register cpufreq device");
return 0;
diff --git a/drivers/clk/mvebu/dove-divider.c b/drivers/clk/mvebu/dove-divider.c
index d5c5bfa..3e0b52d 100644
--- a/drivers/clk/mvebu/dove-divider.c
+++ b/drivers/clk/mvebu/dove-divider.c
@@ -247,7 +247,7 @@
void __init dove_divider_clk_init(struct device_node *np)
{
- void *base;
+ void __iomem *base;
base = of_iomap(np, 0);
if (WARN_ON(!base))
diff --git a/drivers/clk/qcom/gcc-apq8084.c b/drivers/clk/qcom/gcc-apq8084.c
index cf73e53..070037a 100644
--- a/drivers/clk/qcom/gcc-apq8084.c
+++ b/drivers/clk/qcom/gcc-apq8084.c
@@ -3587,7 +3587,6 @@
.val_bits = 32,
.max_register = 0x1fc0,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc gcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/gcc-ipq806x.c b/drivers/clk/qcom/gcc-ipq806x.c
index b692ae8..dd5402b 100644
--- a/drivers/clk/qcom/gcc-ipq806x.c
+++ b/drivers/clk/qcom/gcc-ipq806x.c
@@ -3005,7 +3005,6 @@
.val_bits = 32,
.max_register = 0x3e40,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc gcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8660.c b/drivers/clk/qcom/gcc-msm8660.c
index f6a2b14..ad41303 100644
--- a/drivers/clk/qcom/gcc-msm8660.c
+++ b/drivers/clk/qcom/gcc-msm8660.c
@@ -2702,7 +2702,6 @@
.val_bits = 32,
.max_register = 0x363c,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc gcc_msm8660_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8916.c b/drivers/clk/qcom/gcc-msm8916.c
index e3bf09d..8cc9b28 100644
--- a/drivers/clk/qcom/gcc-msm8916.c
+++ b/drivers/clk/qcom/gcc-msm8916.c
@@ -3336,7 +3336,6 @@
.val_bits = 32,
.max_register = 0x80000,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc gcc_msm8916_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8960.c b/drivers/clk/qcom/gcc-msm8960.c
index f31111e..983dd7d 100644
--- a/drivers/clk/qcom/gcc-msm8960.c
+++ b/drivers/clk/qcom/gcc-msm8960.c
@@ -3468,7 +3468,6 @@
.val_bits = 32,
.max_register = 0x3660,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct regmap_config gcc_apq8064_regmap_config = {
@@ -3477,7 +3476,6 @@
.val_bits = 32,
.max_register = 0x3880,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc gcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/gcc-msm8974.c b/drivers/clk/qcom/gcc-msm8974.c
index df164d6..335952d 100644
--- a/drivers/clk/qcom/gcc-msm8974.c
+++ b/drivers/clk/qcom/gcc-msm8974.c
@@ -2680,7 +2680,6 @@
.val_bits = 32,
.max_register = 0x1fc0,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc gcc_msm8974_desc = {
diff --git a/drivers/clk/qcom/lcc-ipq806x.c b/drivers/clk/qcom/lcc-ipq806x.c
index 62e79fa..db3998e 100644
--- a/drivers/clk/qcom/lcc-ipq806x.c
+++ b/drivers/clk/qcom/lcc-ipq806x.c
@@ -419,7 +419,6 @@
.val_bits = 32,
.max_register = 0xfc,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc lcc_ipq806x_desc = {
diff --git a/drivers/clk/qcom/lcc-msm8960.c b/drivers/clk/qcom/lcc-msm8960.c
index bf95bb0..4fcf9d1 100644
--- a/drivers/clk/qcom/lcc-msm8960.c
+++ b/drivers/clk/qcom/lcc-msm8960.c
@@ -524,7 +524,6 @@
.val_bits = 32,
.max_register = 0xfc,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc lcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-apq8084.c b/drivers/clk/qcom/mmcc-apq8084.c
index 1e703fd..30777f9 100644
--- a/drivers/clk/qcom/mmcc-apq8084.c
+++ b/drivers/clk/qcom/mmcc-apq8084.c
@@ -3368,7 +3368,6 @@
.val_bits = 32,
.max_register = 0x5104,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc mmcc_apq8084_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8960.c b/drivers/clk/qcom/mmcc-msm8960.c
index d73a048..00e3619 100644
--- a/drivers/clk/qcom/mmcc-msm8960.c
+++ b/drivers/clk/qcom/mmcc-msm8960.c
@@ -3029,7 +3029,6 @@
.val_bits = 32,
.max_register = 0x334,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct regmap_config mmcc_apq8064_regmap_config = {
@@ -3038,7 +3037,6 @@
.val_bits = 32,
.max_register = 0x350,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc mmcc_msm8960_desc = {
diff --git a/drivers/clk/qcom/mmcc-msm8974.c b/drivers/clk/qcom/mmcc-msm8974.c
index bbe28ed..9d790bc 100644
--- a/drivers/clk/qcom/mmcc-msm8974.c
+++ b/drivers/clk/qcom/mmcc-msm8974.c
@@ -2594,7 +2594,6 @@
.val_bits = 32,
.max_register = 0x5104,
.fast_io = true,
- .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static const struct qcom_cc_desc mmcc_msm8974_desc = {
diff --git a/drivers/clk/rockchip/clk-rk3036.c b/drivers/clk/rockchip/clk-rk3036.c
index ebce980..bc7fbac 100644
--- a/drivers/clk/rockchip/clk-rk3036.c
+++ b/drivers/clk/rockchip/clk-rk3036.c
@@ -133,7 +133,7 @@
PNAME(mux_uart0_p) = { "uart0_src", "uart0_frac", "xin24m" };
PNAME(mux_uart1_p) = { "uart1_src", "uart1_frac", "xin24m" };
PNAME(mux_uart2_p) = { "uart2_src", "uart2_frac", "xin24m" };
-PNAME(mux_mac_p) = { "mac_pll_src", "ext_gmac" };
+PNAME(mux_mac_p) = { "mac_pll_src", "rmii_clkin" };
PNAME(mux_dclk_p) = { "dclk_lcdc", "dclk_cru" };
static struct rockchip_pll_clock rk3036_pll_clks[] __initdata = {
@@ -224,16 +224,16 @@
RK2928_CLKGATE_CON(2), 2, GFLAGS),
COMPOSITE_NODIV(SCLK_TIMER0, "sclk_timer0", mux_timer_p, CLK_IGNORE_UNUSED,
- RK2928_CLKSEL_CON(2), 4, 1, DFLAGS,
+ RK2928_CLKSEL_CON(2), 4, 1, MFLAGS,
RK2928_CLKGATE_CON(1), 0, GFLAGS),
COMPOSITE_NODIV(SCLK_TIMER1, "sclk_timer1", mux_timer_p, CLK_IGNORE_UNUSED,
- RK2928_CLKSEL_CON(2), 5, 1, DFLAGS,
+ RK2928_CLKSEL_CON(2), 5, 1, MFLAGS,
RK2928_CLKGATE_CON(1), 1, GFLAGS),
COMPOSITE_NODIV(SCLK_TIMER2, "sclk_timer2", mux_timer_p, CLK_IGNORE_UNUSED,
- RK2928_CLKSEL_CON(2), 6, 1, DFLAGS,
+ RK2928_CLKSEL_CON(2), 6, 1, MFLAGS,
RK2928_CLKGATE_CON(2), 4, GFLAGS),
COMPOSITE_NODIV(SCLK_TIMER3, "sclk_timer3", mux_timer_p, CLK_IGNORE_UNUSED,
- RK2928_CLKSEL_CON(2), 7, 1, DFLAGS,
+ RK2928_CLKSEL_CON(2), 7, 1, MFLAGS,
RK2928_CLKGATE_CON(2), 5, GFLAGS),
MUX(0, "uart_pll_clk", mux_pll_src_apll_dpll_gpll_usb480m_p, 0,
@@ -242,11 +242,11 @@
RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
RK2928_CLKGATE_CON(1), 8, GFLAGS),
COMPOSITE_NOMUX(0, "uart1_src", "uart_pll_clk", 0,
- RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
- RK2928_CLKGATE_CON(1), 8, GFLAGS),
+ RK2928_CLKSEL_CON(14), 0, 7, DFLAGS,
+ RK2928_CLKGATE_CON(1), 10, GFLAGS),
COMPOSITE_NOMUX(0, "uart2_src", "uart_pll_clk", 0,
- RK2928_CLKSEL_CON(13), 0, 7, DFLAGS,
- RK2928_CLKGATE_CON(1), 8, GFLAGS),
+ RK2928_CLKSEL_CON(15), 0, 7, DFLAGS,
+ RK2928_CLKGATE_CON(1), 12, GFLAGS),
COMPOSITE_FRACMUX(0, "uart0_frac", "uart0_src", CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(17), 0,
RK2928_CLKGATE_CON(1), 9, GFLAGS,
@@ -279,13 +279,13 @@
RK2928_CLKGATE_CON(3), 2, GFLAGS),
COMPOSITE_NODIV(0, "sclk_sdmmc_src", mux_mmc_src_p, 0,
- RK2928_CLKSEL_CON(12), 8, 2, DFLAGS,
+ RK2928_CLKSEL_CON(12), 8, 2, MFLAGS,
RK2928_CLKGATE_CON(2), 11, GFLAGS),
DIV(SCLK_SDMMC, "sclk_sdmmc", "sclk_sdmmc_src", 0,
RK2928_CLKSEL_CON(11), 0, 7, DFLAGS),
COMPOSITE_NODIV(0, "sclk_sdio_src", mux_mmc_src_p, 0,
- RK2928_CLKSEL_CON(12), 10, 2, DFLAGS,
+ RK2928_CLKSEL_CON(12), 10, 2, MFLAGS,
RK2928_CLKGATE_CON(2), 13, GFLAGS),
DIV(SCLK_SDIO, "sclk_sdio", "sclk_sdio_src", 0,
RK2928_CLKSEL_CON(11), 8, 7, DFLAGS),
@@ -344,12 +344,12 @@
RK2928_CLKGATE_CON(10), 5, GFLAGS),
COMPOSITE_NOGATE(0, "mac_pll_src", mux_pll_src_3plls_p, 0,
- RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 4, 5, DFLAGS),
+ RK2928_CLKSEL_CON(21), 0, 2, MFLAGS, 9, 5, DFLAGS),
MUX(SCLK_MACREF, "mac_clk_ref", mux_mac_p, CLK_SET_RATE_PARENT,
RK2928_CLKSEL_CON(21), 3, 1, MFLAGS),
COMPOSITE_NOMUX(SCLK_MAC, "mac_clk", "mac_clk_ref", 0,
- RK2928_CLKSEL_CON(21), 9, 5, DFLAGS,
+ RK2928_CLKSEL_CON(21), 4, 5, DFLAGS,
RK2928_CLKGATE_CON(2), 6, GFLAGS),
MUX(SCLK_HDMI, "dclk_hdmi", mux_dclk_p, 0,
diff --git a/drivers/clk/rockchip/clk-rk3368.c b/drivers/clk/rockchip/clk-rk3368.c
index be0ede5..21f3ea9 100644
--- a/drivers/clk/rockchip/clk-rk3368.c
+++ b/drivers/clk/rockchip/clk-rk3368.c
@@ -780,13 +780,13 @@
GATE(PCLK_TSADC, "pclk_tsadc", "pclk_peri", 0, RK3368_CLKGATE_CON(20), 0, GFLAGS),
/* pclk_pd_alive gates */
- GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 8, GFLAGS),
- GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 7, GFLAGS),
- GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 12, GFLAGS),
- GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 11, GFLAGS),
- GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 3, GFLAGS),
- GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 2, GFLAGS),
- GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(14), 1, GFLAGS),
+ GATE(PCLK_TIMER1, "pclk_timer1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 13, GFLAGS),
+ GATE(PCLK_TIMER0, "pclk_timer0", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 12, GFLAGS),
+ GATE(0, "pclk_alive_niu", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 9, GFLAGS),
+ GATE(PCLK_GRF, "pclk_grf", "pclk_pd_alive", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(22), 8, GFLAGS),
+ GATE(PCLK_GPIO3, "pclk_gpio3", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 3, GFLAGS),
+ GATE(PCLK_GPIO2, "pclk_gpio2", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 2, GFLAGS),
+ GATE(PCLK_GPIO1, "pclk_gpio1", "pclk_pd_alive", 0, RK3368_CLKGATE_CON(22), 1, GFLAGS),
/*
* pclk_vio gates
@@ -796,12 +796,12 @@
GATE(0, "pclk_dphytx", "hclk_vio", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(14), 8, GFLAGS),
/* pclk_pd_pmu gates */
- GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 0, GFLAGS),
- GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(17), 4, GFLAGS),
- GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 3, GFLAGS),
- GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS),
- GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 1, GFLAGS),
- GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(17), 2, GFLAGS),
+ GATE(PCLK_PMUGRF, "pclk_pmugrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 5, GFLAGS),
+ GATE(PCLK_GPIO0, "pclk_gpio0", "pclk_pd_pmu", 0, RK3368_CLKGATE_CON(23), 4, GFLAGS),
+ GATE(PCLK_SGRF, "pclk_sgrf", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 3, GFLAGS),
+ GATE(0, "pclk_pmu_noc", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 2, GFLAGS),
+ GATE(0, "pclk_intmem1", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 1, GFLAGS),
+ GATE(PCLK_PMU, "pclk_pmu", "pclk_pd_pmu", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(23), 0, GFLAGS),
/* timer gates */
GATE(0, "sclk_timer15", "xin24m", CLK_IGNORE_UNUSED, RK3368_CLKGATE_CON(24), 11, GFLAGS),
diff --git a/drivers/clk/tegra/clk-emc.c b/drivers/clk/tegra/clk-emc.c
index e1fe8f3..74e7544 100644
--- a/drivers/clk/tegra/clk-emc.c
+++ b/drivers/clk/tegra/clk-emc.c
@@ -450,8 +450,10 @@
struct emc_timing *timing = tegra->timings + (i++);
err = load_one_timing_from_dt(tegra, timing, child);
- if (err)
+ if (err) {
+ of_node_put(child);
return err;
+ }
timing->ram_code = ram_code;
}
@@ -499,9 +501,9 @@
* fuses until the apbmisc driver is loaded.
*/
err = load_timings_from_dt(tegra, node, node_ram_code);
+ of_node_put(node);
if (err)
return ERR_PTR(err);
- of_node_put(node);
break;
}
diff --git a/drivers/clk/tegra/clk-id.h b/drivers/clk/tegra/clk-id.h
index 19ce073..62ea381 100644
--- a/drivers/clk/tegra/clk-id.h
+++ b/drivers/clk/tegra/clk-id.h
@@ -11,6 +11,7 @@
tegra_clk_afi,
tegra_clk_amx,
tegra_clk_amx1,
+ tegra_clk_apb2ape,
tegra_clk_apbdma,
tegra_clk_apbif,
tegra_clk_ape,
diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c
index a534bfa..6ac3f84 100644
--- a/drivers/clk/tegra/clk-pll.c
+++ b/drivers/clk/tegra/clk-pll.c
@@ -86,15 +86,21 @@
#define PLLE_SS_DISABLE (PLLE_SS_CNTL_BYPASS_SS | PLLE_SS_CNTL_INTERP_RESET |\
PLLE_SS_CNTL_SSC_BYP)
#define PLLE_SS_MAX_MASK 0x1ff
-#define PLLE_SS_MAX_VAL 0x25
+#define PLLE_SS_MAX_VAL_TEGRA114 0x25
+#define PLLE_SS_MAX_VAL_TEGRA210 0x21
#define PLLE_SS_INC_MASK (0xff << 16)
#define PLLE_SS_INC_VAL (0x1 << 16)
#define PLLE_SS_INCINTRV_MASK (0x3f << 24)
-#define PLLE_SS_INCINTRV_VAL (0x20 << 24)
+#define PLLE_SS_INCINTRV_VAL_TEGRA114 (0x20 << 24)
+#define PLLE_SS_INCINTRV_VAL_TEGRA210 (0x23 << 24)
#define PLLE_SS_COEFFICIENTS_MASK \
(PLLE_SS_MAX_MASK | PLLE_SS_INC_MASK | PLLE_SS_INCINTRV_MASK)
-#define PLLE_SS_COEFFICIENTS_VAL \
- (PLLE_SS_MAX_VAL | PLLE_SS_INC_VAL | PLLE_SS_INCINTRV_VAL)
+#define PLLE_SS_COEFFICIENTS_VAL_TEGRA114 \
+ (PLLE_SS_MAX_VAL_TEGRA114 | PLLE_SS_INC_VAL |\
+ PLLE_SS_INCINTRV_VAL_TEGRA114)
+#define PLLE_SS_COEFFICIENTS_VAL_TEGRA210 \
+ (PLLE_SS_MAX_VAL_TEGRA210 | PLLE_SS_INC_VAL |\
+ PLLE_SS_INCINTRV_VAL_TEGRA210)
#define PLLE_AUX_PLLP_SEL BIT(2)
#define PLLE_AUX_USE_LOCKDET BIT(3)
@@ -880,7 +886,7 @@
static int clk_plle_enable(struct clk_hw *hw)
{
struct tegra_clk_pll *pll = to_clk_pll(hw);
- unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+ unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
struct tegra_clk_pll_freq_table sel;
u32 val;
int err;
@@ -1378,7 +1384,7 @@
u32 val;
int ret;
unsigned long flags = 0;
- unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+ unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
@@ -1401,7 +1407,7 @@
val |= PLLE_MISC_IDDQ_SW_CTRL;
val &= ~PLLE_MISC_IDDQ_SW_VALUE;
val |= PLLE_MISC_PLLE_PTS;
- val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK;
+ val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
pll_writel_misc(val, pll);
udelay(5);
@@ -1428,7 +1434,7 @@
val = pll_readl(PLLE_SS_CTRL, pll);
val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
val &= ~PLLE_SS_COEFFICIENTS_MASK;
- val |= PLLE_SS_COEFFICIENTS_VAL;
+ val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA114;
pll_writel(val, PLLE_SS_CTRL, pll);
val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2012,9 +2018,9 @@
struct tegra_clk_pll *pll = to_clk_pll(hw);
struct tegra_clk_pll_freq_table sel;
u32 val;
- int ret;
+ int ret = 0;
unsigned long flags = 0;
- unsigned long input_rate = clk_get_rate(clk_get_parent(hw->clk));
+ unsigned long input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate))
return -EINVAL;
@@ -2022,22 +2028,20 @@
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
+ val = pll_readl(pll->params->aux_reg, pll);
+ if (val & PLLE_AUX_SEQ_ENABLE)
+ goto out;
+
val = pll_readl_base(pll);
val &= ~BIT(30); /* Disable lock override */
pll_writel_base(val, pll);
- val = pll_readl(pll->params->aux_reg, pll);
- val |= PLLE_AUX_ENABLE_SWCTL;
- val &= ~PLLE_AUX_SEQ_ENABLE;
- pll_writel(val, pll->params->aux_reg, pll);
- udelay(1);
-
val = pll_readl_misc(pll);
val |= PLLE_MISC_LOCK_ENABLE;
val |= PLLE_MISC_IDDQ_SW_CTRL;
val &= ~PLLE_MISC_IDDQ_SW_VALUE;
val |= PLLE_MISC_PLLE_PTS;
- val |= PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK;
+ val &= ~(PLLE_MISC_VREG_BG_CTRL_MASK | PLLE_MISC_VREG_CTRL_MASK);
pll_writel_misc(val, pll);
udelay(5);
@@ -2067,7 +2071,7 @@
val = pll_readl(PLLE_SS_CTRL, pll);
val &= ~(PLLE_SS_CNTL_CENTER | PLLE_SS_CNTL_INVERT);
val &= ~PLLE_SS_COEFFICIENTS_MASK;
- val |= PLLE_SS_COEFFICIENTS_VAL;
+ val |= PLLE_SS_COEFFICIENTS_VAL_TEGRA210;
pll_writel(val, PLLE_SS_CTRL, pll);
val &= ~(PLLE_SS_CNTL_SSC_BYP | PLLE_SS_CNTL_BYPASS_SS);
pll_writel(val, PLLE_SS_CTRL, pll);
@@ -2104,15 +2108,25 @@
if (pll->lock)
spin_lock_irqsave(pll->lock, flags);
+ /* If PLLE HW sequencer is enabled, SW should not disable PLLE */
+ val = pll_readl(pll->params->aux_reg, pll);
+ if (val & PLLE_AUX_SEQ_ENABLE)
+ goto out;
+
val = pll_readl_base(pll);
val &= ~PLLE_BASE_ENABLE;
pll_writel_base(val, pll);
+ val = pll_readl(pll->params->aux_reg, pll);
+ val |= PLLE_AUX_ENABLE_SWCTL | PLLE_AUX_SS_SWCTL;
+ pll_writel(val, pll->params->aux_reg, pll);
+
val = pll_readl_misc(pll);
val |= PLLE_MISC_IDDQ_SW_CTRL | PLLE_MISC_IDDQ_SW_VALUE;
pll_writel_misc(val, pll);
udelay(1);
+out:
if (pll->lock)
spin_unlock_irqrestore(pll->lock, flags);
}
diff --git a/drivers/clk/tegra/clk-tegra-periph.c b/drivers/clk/tegra/clk-tegra-periph.c
index 6ad381a..ea2b9cbf 100644
--- a/drivers/clk/tegra/clk-tegra-periph.c
+++ b/drivers/clk/tegra/clk-tegra-periph.c
@@ -773,7 +773,7 @@
XUSB("xusb_dev_src", mux_clkm_pllp_pllc_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src),
XUSB("xusb_dev_src", mux_clkm_pllp_pllre, CLK_SOURCE_XUSB_DEV_SRC, 95, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_xusb_dev_src_8),
MUX8("dbgapb", mux_pllp_clkm_2, CLK_SOURCE_DBGAPB, 185, TEGRA_PERIPH_NO_RESET, tegra_clk_dbgapb),
- MUX8("msenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
+ MUX8("nvenc", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVENC, 219, 0, tegra_clk_nvenc),
MUX8("nvdec", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVDEC, 194, 0, tegra_clk_nvdec),
MUX8("nvjpg", mux_pllc2_c_c3_pllp_plla1_clkm, CLK_SOURCE_NVJPG, 195, 0, tegra_clk_nvjpg),
MUX8("ape", mux_plla_pllc4_out0_pllc_pllc4_out1_pllp_pllc4_out2_clkm, CLK_SOURCE_APE, 198, TEGRA_PERIPH_ON_APB, tegra_clk_ape),
@@ -782,7 +782,7 @@
NODIV("sor1", mux_clkm_sor1_brick_sor1_src, CLK_SOURCE_SOR1, 15, MASK(1), 183, 0, tegra_clk_sor1, &sor1_lock),
MUX8("sdmmc_legacy", mux_pllp_out3_clkm_pllp_pllc4, CLK_SOURCE_SDMMC_LEGACY, 193, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_sdmmc_legacy),
MUX8("qspi", mux_pllp_pllc_pllc_out1_pllc4_out2_pllc4_out1_clkm_pllc4_out0, CLK_SOURCE_QSPI, 211, TEGRA_PERIPH_ON_APB, tegra_clk_qspi),
- MUX("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, TEGRA_PERIPH_ON_APB, tegra_clk_vi_i2c),
+ I2C("vii2c", mux_pllp_pllc_clkm, CLK_SOURCE_VI_I2C, 208, tegra_clk_vi_i2c),
MUX("mipibif", mux_pllp_clkm, CLK_SOURCE_MIPIBIF, 173, TEGRA_PERIPH_ON_APB, tegra_clk_mipibif),
MUX("uartape", mux_pllp_pllc_clkm, CLK_SOURCE_UARTAPE, 212, TEGRA_PERIPH_ON_APB | TEGRA_PERIPH_NO_RESET, tegra_clk_uartape),
MUX8("tsecb", mux_pllp_pllc2_c_c3_clkm, CLK_SOURCE_TSECB, 206, 0, tegra_clk_tsecb),
@@ -829,6 +829,7 @@
GATE("xusb_gate", "osc", 143, 0, tegra_clk_xusb_gate, 0),
GATE("pll_p_out_cpu", "pll_p", 223, 0, tegra_clk_pll_p_out_cpu, 0),
GATE("pll_p_out_adsp", "pll_p", 187, 0, tegra_clk_pll_p_out_adsp, 0),
+ GATE("apb2ape", "clk_m", 107, 0, tegra_clk_apb2ape, 0),
};
static struct tegra_periph_init_data div_clks[] = {
diff --git a/drivers/clk/tegra/clk-tegra-super-gen4.c b/drivers/clk/tegra/clk-tegra-super-gen4.c
index 4559a20..474de0f 100644
--- a/drivers/clk/tegra/clk-tegra-super-gen4.c
+++ b/drivers/clk/tegra/clk-tegra-super-gen4.c
@@ -67,7 +67,7 @@
"pll_p", "pll_p_out4", "unused",
"unused", "pll_x", "pll_x_out0" };
-const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
+static const struct tegra_super_gen_info tegra_super_gen_info_gen4 = {
.gen = gen4,
.sclk_parents = sclk_parents,
.cclk_g_parents = cclk_g_parents,
@@ -93,7 +93,7 @@
"unused", "unused", "unused", "unused",
"dfllCPU_out" };
-const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
+static const struct tegra_super_gen_info tegra_super_gen_info_gen5 = {
.gen = gen5,
.sclk_parents = sclk_parents_gen5,
.cclk_g_parents = cclk_g_parents_gen5,
@@ -171,7 +171,7 @@
*dt_clk = clk;
}
-void __init tegra_super_clk_init(void __iomem *clk_base,
+static void __init tegra_super_clk_init(void __iomem *clk_base,
void __iomem *pmc_base,
struct tegra_clk *tegra_clks,
struct tegra_clk_pll_params *params,
diff --git a/drivers/clk/tegra/clk-tegra210.c b/drivers/clk/tegra/clk-tegra210.c
index 58514c4..637041f 100644
--- a/drivers/clk/tegra/clk-tegra210.c
+++ b/drivers/clk/tegra/clk-tegra210.c
@@ -59,8 +59,8 @@
#define PLLC3_MISC3 0x50c
#define PLLM_BASE 0x90
-#define PLLM_MISC0 0x9c
#define PLLM_MISC1 0x98
+#define PLLM_MISC2 0x9c
#define PLLP_BASE 0xa0
#define PLLP_MISC0 0xac
#define PLLP_MISC1 0x680
@@ -99,7 +99,7 @@
#define PLLC4_MISC0 0x5a8
#define PLLC4_OUT 0x5e4
#define PLLMB_BASE 0x5e8
-#define PLLMB_MISC0 0x5ec
+#define PLLMB_MISC1 0x5ec
#define PLLA1_BASE 0x6a4
#define PLLA1_MISC0 0x6a8
#define PLLA1_MISC1 0x6ac
@@ -243,7 +243,8 @@
};
static const char *mux_pllmcp_clkm[] = {
- "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_c2", "pll_c3",
+ "pll_m", "pll_c", "pll_p", "clk_m", "pll_m_ud", "pll_mb", "pll_mb",
+ "pll_p",
};
#define mux_pllmcp_clkm_idx NULL
@@ -367,12 +368,12 @@
/* PLLMB */
#define PLLMB_BASE_LOCK (1 << 27)
-#define PLLMB_MISC0_LOCK_OVERRIDE (1 << 18)
-#define PLLMB_MISC0_IDDQ (1 << 17)
-#define PLLMB_MISC0_LOCK_ENABLE (1 << 16)
+#define PLLMB_MISC1_LOCK_OVERRIDE (1 << 18)
+#define PLLMB_MISC1_IDDQ (1 << 17)
+#define PLLMB_MISC1_LOCK_ENABLE (1 << 16)
-#define PLLMB_MISC0_DEFAULT_VALUE 0x00030000
-#define PLLMB_MISC0_WRITE_MASK 0x0007ffff
+#define PLLMB_MISC1_DEFAULT_VALUE 0x00030000
+#define PLLMB_MISC1_WRITE_MASK 0x0007ffff
/* PLLP */
#define PLLP_BASE_OVERRIDE (1 << 28)
@@ -457,7 +458,8 @@
PLLCX_MISC3_WRITE_MASK);
}
-void tegra210_pllcx_set_defaults(const char *name, struct tegra_clk_pll *pllcx)
+static void tegra210_pllcx_set_defaults(const char *name,
+ struct tegra_clk_pll *pllcx)
{
pllcx->params->defaults_set = true;
@@ -482,22 +484,22 @@
udelay(1);
}
-void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc_set_defaults(struct tegra_clk_pll *pllcx)
{
tegra210_pllcx_set_defaults("PLL_C", pllcx);
}
-void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc2_set_defaults(struct tegra_clk_pll *pllcx)
{
tegra210_pllcx_set_defaults("PLL_C2", pllcx);
}
-void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
+static void _pllc3_set_defaults(struct tegra_clk_pll *pllcx)
{
tegra210_pllcx_set_defaults("PLL_C3", pllcx);
}
-void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
+static void _plla1_set_defaults(struct tegra_clk_pll *pllcx)
{
tegra210_pllcx_set_defaults("PLL_A1", pllcx);
}
@@ -507,7 +509,7 @@
* PLL with dynamic ramp and fractional SDM. Dynamic ramp is not used.
* Fractional SDM is allowed to provide exact audio rates.
*/
-void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
+static void tegra210_plla_set_defaults(struct tegra_clk_pll *plla)
{
u32 mask;
u32 val = readl_relaxed(clk_base + plla->params->base_reg);
@@ -559,7 +561,7 @@
* PLLD
* PLL with fractional SDM.
*/
-void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
+static void tegra210_plld_set_defaults(struct tegra_clk_pll *plld)
{
u32 val;
u32 mask = 0xffff;
@@ -698,7 +700,7 @@
udelay(1);
}
-void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
+static void tegra210_plld2_set_defaults(struct tegra_clk_pll *plld2)
{
plldss_defaults("PLL_D2", plld2, PLLD2_MISC0_DEFAULT_VALUE,
PLLD2_MISC1_CFG_DEFAULT_VALUE,
@@ -706,7 +708,7 @@
PLLD2_MISC3_CTRL2_DEFAULT_VALUE);
}
-void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
+static void tegra210_plldp_set_defaults(struct tegra_clk_pll *plldp)
{
plldss_defaults("PLL_DP", plldp, PLLDP_MISC0_DEFAULT_VALUE,
PLLDP_MISC1_CFG_DEFAULT_VALUE,
@@ -719,7 +721,7 @@
* Base and misc0 layout is the same as PLLD2/PLLDP, but no SDM/SSC support.
* VCO is exposed to the clock tree via fixed 1/3 and 1/5 dividers.
*/
-void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
+static void tegra210_pllc4_set_defaults(struct tegra_clk_pll *pllc4)
{
plldss_defaults("PLL_C4", pllc4, PLLC4_MISC0_DEFAULT_VALUE, 0, 0, 0);
}
@@ -728,7 +730,7 @@
* PLLRE
* VCO is exposed to the clock tree directly along with post-divider output
*/
-void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
+static void tegra210_pllre_set_defaults(struct tegra_clk_pll *pllre)
{
u32 mask;
u32 val = readl_relaxed(clk_base + pllre->params->base_reg);
@@ -780,13 +782,13 @@
{
unsigned long input_rate;
- if (!IS_ERR_OR_NULL(hw->clk)) {
+ /* cf rate */
+ if (!IS_ERR_OR_NULL(hw->clk))
input_rate = clk_hw_get_rate(clk_hw_get_parent(hw));
- /* cf rate */
- input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
- } else {
+ else
input_rate = 38400000;
- }
+
+ input_rate /= tegra_pll_get_fixed_mdiv(hw, input_rate);
switch (input_rate) {
case 12000000:
@@ -841,7 +843,7 @@
PLLX_MISC5_WRITE_MASK);
}
-void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
+static void tegra210_pllx_set_defaults(struct tegra_clk_pll *pllx)
{
u32 val;
u32 step_a, step_b;
@@ -901,7 +903,7 @@
}
/* PLLMB */
-void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
+static void tegra210_pllmb_set_defaults(struct tegra_clk_pll *pllmb)
{
u32 mask, val = readl_relaxed(clk_base + pllmb->params->base_reg);
@@ -914,15 +916,15 @@
* PLL is ON: check if defaults already set, then set those
* that can be updated in flight.
*/
- val = PLLMB_MISC0_DEFAULT_VALUE & (~PLLMB_MISC0_IDDQ);
- mask = PLLMB_MISC0_LOCK_ENABLE | PLLMB_MISC0_LOCK_OVERRIDE;
+ val = PLLMB_MISC1_DEFAULT_VALUE & (~PLLMB_MISC1_IDDQ);
+ mask = PLLMB_MISC1_LOCK_ENABLE | PLLMB_MISC1_LOCK_OVERRIDE;
_pll_misc_chk_default(clk_base, pllmb->params, 0, val,
- ~mask & PLLMB_MISC0_WRITE_MASK);
+ ~mask & PLLMB_MISC1_WRITE_MASK);
/* Enable lock detect */
val = readl_relaxed(clk_base + pllmb->params->ext_misc_reg[0]);
val &= ~mask;
- val |= PLLMB_MISC0_DEFAULT_VALUE & mask;
+ val |= PLLMB_MISC1_DEFAULT_VALUE & mask;
writel_relaxed(val, clk_base + pllmb->params->ext_misc_reg[0]);
udelay(1);
@@ -930,7 +932,7 @@
}
/* set IDDQ, enable lock detect */
- writel_relaxed(PLLMB_MISC0_DEFAULT_VALUE,
+ writel_relaxed(PLLMB_MISC1_DEFAULT_VALUE,
clk_base + pllmb->params->ext_misc_reg[0]);
udelay(1);
}
@@ -960,7 +962,7 @@
~mask & PLLP_MISC1_WRITE_MASK);
}
-void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
+static void tegra210_pllp_set_defaults(struct tegra_clk_pll *pllp)
{
u32 mask;
u32 val = readl_relaxed(clk_base + pllp->params->base_reg);
@@ -1022,7 +1024,7 @@
~mask & PLLU_MISC1_WRITE_MASK);
}
-void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
+static void tegra210_pllu_set_defaults(struct tegra_clk_pll *pllu)
{
u32 val = readl_relaxed(clk_base + pllu->params->base_reg);
@@ -1212,8 +1214,9 @@
cfg->m *= PLL_SDM_COEFF;
}
-unsigned long tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
- unsigned long parent_rate)
+static unsigned long
+tegra210_clk_adjust_vco_min(struct tegra_clk_pll_params *params,
+ unsigned long parent_rate)
{
unsigned long vco_min = params->vco_min;
@@ -1386,7 +1389,7 @@
.mdiv_default = 3,
.div_nmp = &pllc_nmp,
.freq_table = pll_cx_freq_table,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK,
.set_defaults = _pllc_set_defaults,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
};
@@ -1425,7 +1428,7 @@
.ext_misc_reg[2] = PLLC2_MISC2,
.ext_misc_reg[3] = PLLC2_MISC3,
.freq_table = pll_cx_freq_table,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK,
.set_defaults = _pllc2_set_defaults,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
};
@@ -1455,7 +1458,7 @@
.ext_misc_reg[2] = PLLC3_MISC2,
.ext_misc_reg[3] = PLLC3_MISC3,
.freq_table = pll_cx_freq_table,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK,
.set_defaults = _pllc3_set_defaults,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
};
@@ -1505,7 +1508,6 @@
.base_reg = PLLC4_BASE,
.misc_reg = PLLC4_MISC0,
.lock_mask = PLL_BASE_LOCK,
- .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
.lock_delay = 300,
.max_p = PLL_QLIN_PDIV_MAX,
.ext_misc_reg[0] = PLLC4_MISC0,
@@ -1517,8 +1519,7 @@
.div_nmp = &pllss_nmp,
.freq_table = pll_c4_vco_freq_table,
.set_defaults = tegra210_pllc4_set_defaults,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE |
- TEGRA_PLL_VCO_OUT,
+ .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
};
@@ -1559,15 +1560,15 @@
.vco_min = 800000000,
.vco_max = 1866000000,
.base_reg = PLLM_BASE,
- .misc_reg = PLLM_MISC1,
+ .misc_reg = PLLM_MISC2,
.lock_mask = PLL_BASE_LOCK,
.lock_enable_bit_idx = PLLM_MISC_LOCK_ENABLE,
.lock_delay = 300,
- .iddq_reg = PLLM_MISC0,
+ .iddq_reg = PLLM_MISC2,
.iddq_bit_idx = PLLM_IDDQ_BIT,
.max_p = PLL_QLIN_PDIV_MAX,
- .ext_misc_reg[0] = PLLM_MISC0,
- .ext_misc_reg[0] = PLLM_MISC1,
+ .ext_misc_reg[0] = PLLM_MISC2,
+ .ext_misc_reg[1] = PLLM_MISC1,
.round_p_to_pdiv = pll_qlin_p_to_pdiv,
.pdiv_tohw = pll_qlin_pdiv_to_hw,
.div_nmp = &pllm_nmp,
@@ -1586,19 +1587,18 @@
.vco_min = 800000000,
.vco_max = 1866000000,
.base_reg = PLLMB_BASE,
- .misc_reg = PLLMB_MISC0,
+ .misc_reg = PLLMB_MISC1,
.lock_mask = PLL_BASE_LOCK,
- .lock_enable_bit_idx = PLLMB_MISC_LOCK_ENABLE,
.lock_delay = 300,
- .iddq_reg = PLLMB_MISC0,
+ .iddq_reg = PLLMB_MISC1,
.iddq_bit_idx = PLLMB_IDDQ_BIT,
.max_p = PLL_QLIN_PDIV_MAX,
- .ext_misc_reg[0] = PLLMB_MISC0,
+ .ext_misc_reg[0] = PLLMB_MISC1,
.round_p_to_pdiv = pll_qlin_p_to_pdiv,
.pdiv_tohw = pll_qlin_pdiv_to_hw,
.div_nmp = &pllm_nmp,
.freq_table = pll_m_freq_table,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK,
.set_defaults = tegra210_pllmb_set_defaults,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
};
@@ -1671,7 +1671,6 @@
.base_reg = PLLRE_BASE,
.misc_reg = PLLRE_MISC0,
.lock_mask = PLLRE_MISC_LOCK,
- .lock_enable_bit_idx = PLLRE_MISC_LOCK_ENABLE,
.lock_delay = 300,
.max_p = PLL_QLIN_PDIV_MAX,
.ext_misc_reg[0] = PLLRE_MISC0,
@@ -1681,8 +1680,7 @@
.pdiv_tohw = pll_qlin_pdiv_to_hw,
.div_nmp = &pllre_nmp,
.freq_table = pll_re_vco_freq_table,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC |
- TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
+ .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_LOCK_MISC | TEGRA_PLL_VCO_OUT,
.set_defaults = tegra210_pllre_set_defaults,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
};
@@ -1712,7 +1710,6 @@
.base_reg = PLLP_BASE,
.misc_reg = PLLP_MISC0,
.lock_mask = PLL_BASE_LOCK,
- .lock_enable_bit_idx = PLLP_MISC_LOCK_ENABLE,
.lock_delay = 300,
.iddq_reg = PLLP_MISC0,
.iddq_bit_idx = PLLXP_IDDQ_BIT,
@@ -1721,8 +1718,7 @@
.div_nmp = &pllp_nmp,
.freq_table = pll_p_freq_table,
.fixed_rate = 408000000,
- .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK |
- TEGRA_PLL_HAS_LOCK_ENABLE | TEGRA_PLL_VCO_OUT,
+ .flags = TEGRA_PLL_FIXED | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
.set_defaults = tegra210_pllp_set_defaults,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
};
@@ -1750,7 +1746,7 @@
.ext_misc_reg[2] = PLLA1_MISC2,
.ext_misc_reg[3] = PLLA1_MISC3,
.freq_table = pll_cx_freq_table,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK,
.set_defaults = _plla1_set_defaults,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
};
@@ -1787,7 +1783,6 @@
.base_reg = PLLA_BASE,
.misc_reg = PLLA_MISC0,
.lock_mask = PLL_BASE_LOCK,
- .lock_enable_bit_idx = PLLA_MISC_LOCK_ENABLE,
.lock_delay = 300,
.round_p_to_pdiv = pll_qlin_p_to_pdiv,
.pdiv_tohw = pll_qlin_pdiv_to_hw,
@@ -1802,8 +1797,7 @@
.ext_misc_reg[1] = PLLA_MISC1,
.ext_misc_reg[2] = PLLA_MISC2,
.freq_table = pll_a_freq_table,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW |
- TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK | TEGRA_MDIV_NEW,
.set_defaults = tegra210_plla_set_defaults,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
.set_gain = tegra210_clk_pll_set_gain,
@@ -1836,7 +1830,6 @@
.base_reg = PLLD_BASE,
.misc_reg = PLLD_MISC0,
.lock_mask = PLL_BASE_LOCK,
- .lock_enable_bit_idx = PLLD_MISC_LOCK_ENABLE,
.lock_delay = 1000,
.iddq_reg = PLLD_MISC0,
.iddq_bit_idx = PLLD_IDDQ_BIT,
@@ -1850,7 +1843,7 @@
.ext_misc_reg[0] = PLLD_MISC0,
.ext_misc_reg[1] = PLLD_MISC1,
.freq_table = pll_d_freq_table,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK,
.mdiv_default = 1,
.set_defaults = tegra210_plld_set_defaults,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
@@ -1876,7 +1869,6 @@
.base_reg = PLLD2_BASE,
.misc_reg = PLLD2_MISC0,
.lock_mask = PLL_BASE_LOCK,
- .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
.lock_delay = 300,
.iddq_reg = PLLD2_BASE,
.iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1897,7 +1889,7 @@
.mdiv_default = 1,
.freq_table = tegra210_pll_d2_freq_table,
.set_defaults = tegra210_plld2_set_defaults,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
.set_gain = tegra210_clk_pll_set_gain,
.adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1920,7 +1912,6 @@
.base_reg = PLLDP_BASE,
.misc_reg = PLLDP_MISC,
.lock_mask = PLL_BASE_LOCK,
- .lock_enable_bit_idx = PLLSS_MISC_LOCK_ENABLE,
.lock_delay = 300,
.iddq_reg = PLLDP_BASE,
.iddq_bit_idx = PLLSS_IDDQ_BIT,
@@ -1941,7 +1932,7 @@
.mdiv_default = 1,
.freq_table = pll_dp_freq_table,
.set_defaults = tegra210_plldp_set_defaults,
- .flags = TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE,
+ .flags = TEGRA_PLL_USE_LOCK,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
.set_gain = tegra210_clk_pll_set_gain,
.adjust_vco = tegra210_clk_adjust_vco_min,
@@ -1973,7 +1964,6 @@
.base_reg = PLLU_BASE,
.misc_reg = PLLU_MISC0,
.lock_mask = PLL_BASE_LOCK,
- .lock_enable_bit_idx = PLLU_MISC_LOCK_ENABLE,
.lock_delay = 1000,
.iddq_reg = PLLU_MISC0,
.iddq_bit_idx = PLLU_IDDQ_BIT,
@@ -1983,8 +1973,7 @@
.pdiv_tohw = pll_qlin_pdiv_to_hw,
.div_nmp = &pllu_nmp,
.freq_table = pll_u_freq_table,
- .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_HAS_LOCK_ENABLE |
- TEGRA_PLL_VCO_OUT,
+ .flags = TEGRA_PLLU | TEGRA_PLL_USE_LOCK | TEGRA_PLL_VCO_OUT,
.set_defaults = tegra210_pllu_set_defaults,
.calc_rate = tegra210_pll_fixed_mdiv_cfg,
};
@@ -2218,6 +2207,7 @@
[tegra_clk_pll_c4_out1] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT1, .present = true },
[tegra_clk_pll_c4_out2] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT2, .present = true },
[tegra_clk_pll_c4_out3] = { .dt_id = TEGRA210_CLK_PLL_C4_OUT3, .present = true },
+ [tegra_clk_apb2ape] = { .dt_id = TEGRA210_CLK_APB2APE, .present = true },
};
static struct tegra_devclk devclks[] __initdata = {
@@ -2519,7 +2509,7 @@
/* PLLU_VCO */
val = readl(clk_base + pll_u_vco_params.base_reg);
- val &= ~BIT(24); /* disable PLLU_OVERRIDE */
+ val &= ~PLLU_BASE_OVERRIDE; /* disable PLLU_OVERRIDE */
writel(val, clk_base + pll_u_vco_params.base_reg);
clk = tegra_clk_register_pllre("pll_u_vco", "pll_ref", clk_base, pmc,
@@ -2738,8 +2728,6 @@
{ TEGRA210_CLK_DFLL_REF, TEGRA210_CLK_PLL_P, 51000000, 1 },
{ TEGRA210_CLK_SBC4, TEGRA210_CLK_PLL_P, 12000000, 1 },
{ TEGRA210_CLK_PLL_RE_VCO, TEGRA210_CLK_CLK_MAX, 672000000, 1 },
- { TEGRA210_CLK_PLL_U_OUT1, TEGRA210_CLK_CLK_MAX, 48000000, 1 },
- { TEGRA210_CLK_PLL_U_OUT2, TEGRA210_CLK_CLK_MAX, 60000000, 1 },
{ TEGRA210_CLK_XUSB_GATE, TEGRA210_CLK_CLK_MAX, 0, 1 },
{ TEGRA210_CLK_XUSB_SS_SRC, TEGRA210_CLK_PLL_U_480M, 120000000, 0 },
{ TEGRA210_CLK_XUSB_FS_SRC, TEGRA210_CLK_PLL_U_48M, 48000000, 0 },
diff --git a/drivers/clk/ti/dpll3xxx.c b/drivers/clk/ti/dpll3xxx.c
index 1c30038..cc73929 100644
--- a/drivers/clk/ti/dpll3xxx.c
+++ b/drivers/clk/ti/dpll3xxx.c
@@ -460,7 +460,8 @@
parent = clk_hw_get_parent(hw);
- if (clk_hw_get_rate(hw) == clk_get_rate(dd->clk_bypass)) {
+ if (clk_hw_get_rate(hw) ==
+ clk_hw_get_rate(__clk_get_hw(dd->clk_bypass))) {
WARN_ON(parent != __clk_get_hw(dd->clk_bypass));
r = _omap3_noncore_dpll_bypass(clk);
} else {
diff --git a/drivers/clk/versatile/clk-icst.c b/drivers/clk/versatile/clk-icst.c
index e62f8cb..3bca438 100644
--- a/drivers/clk/versatile/clk-icst.c
+++ b/drivers/clk/versatile/clk-icst.c
@@ -78,6 +78,9 @@
ret = regmap_read(icst->map, icst->vcoreg_off, &val);
if (ret)
return ret;
+
+ /* Mask the 18 bits used by the VCO */
+ val &= ~0x7ffff;
val |= vco.v | (vco.r << 9) | (vco.s << 16);
/* This magic unlocks the VCO so it can be controlled */
diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
index c64d543..ffe9d1c 100644
--- a/drivers/clocksource/arm_arch_timer.c
+++ b/drivers/clocksource/arm_arch_timer.c
@@ -67,7 +67,7 @@
static struct clock_event_device __percpu *arch_timer_evt;
-static bool arch_timer_use_virtual = true;
+static enum ppi_nr arch_timer_uses_ppi = VIRT_PPI;
static bool arch_timer_c3stop;
static bool arch_timer_mem_use_virtual;
@@ -263,14 +263,20 @@
clk->name = "arch_sys_timer";
clk->rating = 450;
clk->cpumask = cpumask_of(smp_processor_id());
- if (arch_timer_use_virtual) {
- clk->irq = arch_timer_ppi[VIRT_PPI];
+ clk->irq = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case VIRT_PPI:
clk->set_state_shutdown = arch_timer_shutdown_virt;
clk->set_next_event = arch_timer_set_next_event_virt;
- } else {
- clk->irq = arch_timer_ppi[PHYS_SECURE_PPI];
+ break;
+ case PHYS_SECURE_PPI:
+ case PHYS_NONSECURE_PPI:
+ case HYP_PPI:
clk->set_state_shutdown = arch_timer_shutdown_phys;
clk->set_next_event = arch_timer_set_next_event_phys;
+ break;
+ default:
+ BUG();
}
} else {
clk->features |= CLOCK_EVT_FEAT_DYNIRQ;
@@ -338,17 +344,20 @@
arch_timer_set_cntkctl(cntkctl);
}
+static bool arch_timer_has_nonsecure_ppi(void)
+{
+ return (arch_timer_uses_ppi == PHYS_SECURE_PPI &&
+ arch_timer_ppi[PHYS_NONSECURE_PPI]);
+}
+
static int arch_timer_setup(struct clock_event_device *clk)
{
__arch_timer_setup(ARCH_CP15_TIMER, clk);
- if (arch_timer_use_virtual)
- enable_percpu_irq(arch_timer_ppi[VIRT_PPI], 0);
- else {
- enable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI], 0);
- if (arch_timer_ppi[PHYS_NONSECURE_PPI])
- enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
- }
+ enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], 0);
+
+ if (arch_timer_has_nonsecure_ppi())
+ enable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI], 0);
arch_counter_set_user_access();
if (IS_ENABLED(CONFIG_ARM_ARCH_TIMER_EVTSTREAM))
@@ -390,7 +399,7 @@
(unsigned long)arch_timer_rate / 1000000,
(unsigned long)(arch_timer_rate / 10000) % 100,
type & ARCH_CP15_TIMER ?
- arch_timer_use_virtual ? "virt" : "phys" :
+ (arch_timer_uses_ppi == VIRT_PPI) ? "virt" : "phys" :
"",
type == (ARCH_CP15_TIMER | ARCH_MEM_TIMER) ? "/" : "",
type & ARCH_MEM_TIMER ?
@@ -460,7 +469,7 @@
/* Register the CP15 based counter if we have one */
if (type & ARCH_CP15_TIMER) {
- if (IS_ENABLED(CONFIG_ARM64) || arch_timer_use_virtual)
+ if (IS_ENABLED(CONFIG_ARM64) || arch_timer_uses_ppi == VIRT_PPI)
arch_timer_read_counter = arch_counter_get_cntvct;
else
arch_timer_read_counter = arch_counter_get_cntpct;
@@ -490,13 +499,9 @@
pr_debug("arch_timer_teardown disable IRQ%d cpu #%d\n",
clk->irq, smp_processor_id());
- if (arch_timer_use_virtual)
- disable_percpu_irq(arch_timer_ppi[VIRT_PPI]);
- else {
- disable_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI]);
- if (arch_timer_ppi[PHYS_NONSECURE_PPI])
- disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
- }
+ disable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi]);
+ if (arch_timer_has_nonsecure_ppi())
+ disable_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI]);
clk->set_state_shutdown(clk);
}
@@ -562,12 +567,14 @@
goto out;
}
- if (arch_timer_use_virtual) {
- ppi = arch_timer_ppi[VIRT_PPI];
+ ppi = arch_timer_ppi[arch_timer_uses_ppi];
+ switch (arch_timer_uses_ppi) {
+ case VIRT_PPI:
err = request_percpu_irq(ppi, arch_timer_handler_virt,
"arch_timer", arch_timer_evt);
- } else {
- ppi = arch_timer_ppi[PHYS_SECURE_PPI];
+ break;
+ case PHYS_SECURE_PPI:
+ case PHYS_NONSECURE_PPI:
err = request_percpu_irq(ppi, arch_timer_handler_phys,
"arch_timer", arch_timer_evt);
if (!err && arch_timer_ppi[PHYS_NONSECURE_PPI]) {
@@ -578,6 +585,13 @@
free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
arch_timer_evt);
}
+ break;
+ case HYP_PPI:
+ err = request_percpu_irq(ppi, arch_timer_handler_phys,
+ "arch_timer", arch_timer_evt);
+ break;
+ default:
+ BUG();
}
if (err) {
@@ -602,15 +616,10 @@
out_unreg_notify:
unregister_cpu_notifier(&arch_timer_cpu_nb);
out_free_irq:
- if (arch_timer_use_virtual)
- free_percpu_irq(arch_timer_ppi[VIRT_PPI], arch_timer_evt);
- else {
- free_percpu_irq(arch_timer_ppi[PHYS_SECURE_PPI],
+ free_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], arch_timer_evt);
+ if (arch_timer_has_nonsecure_ppi())
+ free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
arch_timer_evt);
- if (arch_timer_ppi[PHYS_NONSECURE_PPI])
- free_percpu_irq(arch_timer_ppi[PHYS_NONSECURE_PPI],
- arch_timer_evt);
- }
out_free:
free_percpu(arch_timer_evt);
@@ -697,12 +706,25 @@
*
* If no interrupt provided for virtual timer, we'll have to
* stick to the physical timer. It'd better be accessible...
+ *
+ * On ARMv8.1 with VH extensions, the kernel runs in HYP. VHE
+ * accesses to CNTP_*_EL1 registers are silently redirected to
+ * their CNTHP_*_EL2 counterparts, and use a different PPI
+ * number.
*/
if (is_hyp_mode_available() || !arch_timer_ppi[VIRT_PPI]) {
- arch_timer_use_virtual = false;
+ bool has_ppi;
- if (!arch_timer_ppi[PHYS_SECURE_PPI] ||
- !arch_timer_ppi[PHYS_NONSECURE_PPI]) {
+ if (is_kernel_in_hyp_mode()) {
+ arch_timer_uses_ppi = HYP_PPI;
+ has_ppi = !!arch_timer_ppi[HYP_PPI];
+ } else {
+ arch_timer_uses_ppi = PHYS_SECURE_PPI;
+ has_ppi = (!!arch_timer_ppi[PHYS_SECURE_PPI] ||
+ !!arch_timer_ppi[PHYS_NONSECURE_PPI]);
+ }
+
+ if (!has_ppi) {
pr_warn("arch_timer: No interrupt available, giving up\n");
return;
}
@@ -735,7 +757,7 @@
*/
if (IS_ENABLED(CONFIG_ARM) &&
of_property_read_bool(np, "arm,cpu-registers-not-fw-configured"))
- arch_timer_use_virtual = false;
+ arch_timer_uses_ppi = PHYS_SECURE_PPI;
arch_timer_init();
}
diff --git a/drivers/crypto/atmel-sha.c b/drivers/crypto/atmel-sha.c
index 20de861..8bf9914 100644
--- a/drivers/crypto/atmel-sha.c
+++ b/drivers/crypto/atmel-sha.c
@@ -782,7 +782,7 @@
dd->flags &= ~(SHA_FLAGS_BUSY | SHA_FLAGS_FINAL | SHA_FLAGS_CPU |
SHA_FLAGS_DMA_READY | SHA_FLAGS_OUTPUT_READY);
- clk_disable_unprepare(dd->iclk);
+ clk_disable(dd->iclk);
if (req->base.complete)
req->base.complete(&req->base, err);
@@ -795,7 +795,7 @@
{
int err;
- err = clk_prepare_enable(dd->iclk);
+ err = clk_enable(dd->iclk);
if (err)
return err;
@@ -822,7 +822,7 @@
dev_info(dd->dev,
"version: 0x%x\n", dd->hw_version);
- clk_disable_unprepare(dd->iclk);
+ clk_disable(dd->iclk);
}
static int atmel_sha_handle_queue(struct atmel_sha_dev *dd,
@@ -1410,6 +1410,10 @@
goto res_err;
}
+ err = clk_prepare(sha_dd->iclk);
+ if (err)
+ goto res_err;
+
atmel_sha_hw_version_init(sha_dd);
atmel_sha_get_cap(sha_dd);
@@ -1421,12 +1425,12 @@
if (IS_ERR(pdata)) {
dev_err(&pdev->dev, "platform data not available\n");
err = PTR_ERR(pdata);
- goto res_err;
+ goto iclk_unprepare;
}
}
if (!pdata->dma_slave) {
err = -ENXIO;
- goto res_err;
+ goto iclk_unprepare;
}
err = atmel_sha_dma_init(sha_dd, pdata);
if (err)
@@ -1457,6 +1461,8 @@
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
err_sha_dma:
+iclk_unprepare:
+ clk_unprepare(sha_dd->iclk);
res_err:
tasklet_kill(&sha_dd->done_task);
sha_dd_err:
@@ -1483,12 +1489,7 @@
if (sha_dd->caps.has_dma)
atmel_sha_dma_cleanup(sha_dd);
- iounmap(sha_dd->io_base);
-
- clk_put(sha_dd->iclk);
-
- if (sha_dd->irq >= 0)
- free_irq(sha_dd->irq, sha_dd);
+ clk_unprepare(sha_dd->iclk);
return 0;
}
diff --git a/drivers/crypto/marvell/cesa.c b/drivers/crypto/marvell/cesa.c
index 0643e33..c0656e7 100644
--- a/drivers/crypto/marvell/cesa.c
+++ b/drivers/crypto/marvell/cesa.c
@@ -306,7 +306,7 @@
return -ENOMEM;
dma->padding_pool = dmam_pool_create("cesa_padding", dev, 72, 1, 0);
- if (!dma->cache_pool)
+ if (!dma->padding_pool)
return -ENOMEM;
cesa->dma = dma;
diff --git a/drivers/devfreq/tegra-devfreq.c b/drivers/devfreq/tegra-devfreq.c
index 848b93ee..fe9dce0 100644
--- a/drivers/devfreq/tegra-devfreq.c
+++ b/drivers/devfreq/tegra-devfreq.c
@@ -500,6 +500,8 @@
clk_set_min_rate(tegra->emc_clock, rate);
clk_set_rate(tegra->emc_clock, 0);
+ *freq = rate;
+
return 0;
}
diff --git a/drivers/dma/dw/core.c b/drivers/dma/dw/core.c
index e893318..5ad0ec1 100644
--- a/drivers/dma/dw/core.c
+++ b/drivers/dma/dw/core.c
@@ -156,7 +156,6 @@
/* Enable interrupts */
channel_set_bit(dw, MASK.XFER, dwc->mask);
- channel_set_bit(dw, MASK.BLOCK, dwc->mask);
channel_set_bit(dw, MASK.ERROR, dwc->mask);
dwc->initialized = true;
@@ -588,6 +587,9 @@
spin_unlock_irqrestore(&dwc->lock, flags);
}
+
+ /* Re-enable interrupts */
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
}
/* ------------------------------------------------------------------------- */
@@ -618,11 +620,8 @@
dwc_scan_descriptors(dw, dwc);
}
- /*
- * Re-enable interrupts.
- */
+ /* Re-enable interrupts */
channel_set_bit(dw, MASK.XFER, dw->all_chan_mask);
- channel_set_bit(dw, MASK.BLOCK, dw->all_chan_mask);
channel_set_bit(dw, MASK.ERROR, dw->all_chan_mask);
}
@@ -1261,6 +1260,7 @@
int dw_dma_cyclic_start(struct dma_chan *chan)
{
struct dw_dma_chan *dwc = to_dw_dma_chan(chan);
+ struct dw_dma *dw = to_dw_dma(chan->device);
unsigned long flags;
if (!test_bit(DW_DMA_IS_CYCLIC, &dwc->flags)) {
@@ -1269,7 +1269,12 @@
}
spin_lock_irqsave(&dwc->lock, flags);
+
+ /* Enable interrupts to perform cyclic transfer */
+ channel_set_bit(dw, MASK.BLOCK, dwc->mask);
+
dwc_dostart(dwc, dwc->cdesc->desc[0]);
+
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
diff --git a/drivers/dma/dw/pci.c b/drivers/dma/dw/pci.c
index 4c30fdd..358f968 100644
--- a/drivers/dma/dw/pci.c
+++ b/drivers/dma/dw/pci.c
@@ -108,6 +108,10 @@
/* Haswell */
{ PCI_VDEVICE(INTEL, 0x9c60) },
+
+ /* Broadwell */
+ { PCI_VDEVICE(INTEL, 0x9ce0) },
+
{ }
};
MODULE_DEVICE_TABLE(pci, dw_pci_id_table);
diff --git a/drivers/dma/edma.c b/drivers/dma/edma.c
index d92d655..e3d7fcb 100644
--- a/drivers/dma/edma.c
+++ b/drivers/dma/edma.c
@@ -113,6 +113,9 @@
#define GET_NUM_REGN(x) ((x & 0x300000) >> 20) /* bits 20-21 */
#define CHMAP_EXIST BIT(24)
+/* CCSTAT register */
+#define EDMA_CCSTAT_ACTV BIT(4)
+
/*
* Max of 20 segments per channel to conserve PaRAM slots
* Also note that MAX_NR_SG should be atleast the no.of periods
@@ -1680,9 +1683,20 @@
spin_unlock_irqrestore(&echan->vchan.lock, flags);
}
+/*
+ * This limit exists to avoid a possible infinite loop when waiting for proof
+ * that a particular transfer is completed. This limit can be hit if there
+ * are large bursts to/from slow devices or the CPU is never able to catch
+ * the DMA hardware idle. On an AM335x transfering 48 bytes from the UART
+ * RX-FIFO, as many as 55 loops have been seen.
+ */
+#define EDMA_MAX_TR_WAIT_LOOPS 1000
+
static u32 edma_residue(struct edma_desc *edesc)
{
bool dst = edesc->direction == DMA_DEV_TO_MEM;
+ int loop_count = EDMA_MAX_TR_WAIT_LOOPS;
+ struct edma_chan *echan = edesc->echan;
struct edma_pset *pset = edesc->pset;
dma_addr_t done, pos;
int i;
@@ -1691,7 +1705,32 @@
* We always read the dst/src position from the first RamPar
* pset. That's the one which is active now.
*/
- pos = edma_get_position(edesc->echan->ecc, edesc->echan->slot[0], dst);
+ pos = edma_get_position(echan->ecc, echan->slot[0], dst);
+
+ /*
+ * "pos" may represent a transfer request that is still being
+ * processed by the EDMACC or EDMATC. We will busy wait until
+ * any one of the situations occurs:
+ * 1. the DMA hardware is idle
+ * 2. a new transfer request is setup
+ * 3. we hit the loop limit
+ */
+ while (edma_read(echan->ecc, EDMA_CCSTAT) & EDMA_CCSTAT_ACTV) {
+ /* check if a new transfer request is setup */
+ if (edma_get_position(echan->ecc,
+ echan->slot[0], dst) != pos) {
+ break;
+ }
+
+ if (!--loop_count) {
+ dev_dbg_ratelimited(echan->vchan.chan.device->dev,
+ "%s: timeout waiting for PaRAM update\n",
+ __func__);
+ break;
+ }
+
+ cpu_relax();
+ }
/*
* Cyclic is simple. Just subtract pset[0].addr from pos.
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c
index 1d5df2e..21539d5 100644
--- a/drivers/dma/ioat/dma.c
+++ b/drivers/dma/ioat/dma.c
@@ -861,32 +861,42 @@
return;
}
+ spin_lock_bh(&ioat_chan->cleanup_lock);
+
+ /* handle the no-actives case */
+ if (!ioat_ring_active(ioat_chan)) {
+ spin_lock_bh(&ioat_chan->prep_lock);
+ check_active(ioat_chan);
+ spin_unlock_bh(&ioat_chan->prep_lock);
+ spin_unlock_bh(&ioat_chan->cleanup_lock);
+ return;
+ }
+
/* if we haven't made progress and we have already
* acknowledged a pending completion once, then be more
* forceful with a restart
*/
- spin_lock_bh(&ioat_chan->cleanup_lock);
if (ioat_cleanup_preamble(ioat_chan, &phys_complete))
__cleanup(ioat_chan, phys_complete);
else if (test_bit(IOAT_COMPLETION_ACK, &ioat_chan->state)) {
+ u32 chanerr;
+
+ chanerr = readl(ioat_chan->reg_base + IOAT_CHANERR_OFFSET);
+ dev_warn(to_dev(ioat_chan), "Restarting channel...\n");
+ dev_warn(to_dev(ioat_chan), "CHANSTS: %#Lx CHANERR: %#x\n",
+ status, chanerr);
+ dev_warn(to_dev(ioat_chan), "Active descriptors: %d\n",
+ ioat_ring_active(ioat_chan));
+
spin_lock_bh(&ioat_chan->prep_lock);
ioat_restart_channel(ioat_chan);
spin_unlock_bh(&ioat_chan->prep_lock);
spin_unlock_bh(&ioat_chan->cleanup_lock);
return;
- } else {
+ } else
set_bit(IOAT_COMPLETION_ACK, &ioat_chan->state);
- mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
- }
-
- if (ioat_ring_active(ioat_chan))
- mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
- else {
- spin_lock_bh(&ioat_chan->prep_lock);
- check_active(ioat_chan);
- spin_unlock_bh(&ioat_chan->prep_lock);
- }
+ mod_timer(&ioat_chan->timer, jiffies + COMPLETION_TIMEOUT);
spin_unlock_bh(&ioat_chan->cleanup_lock);
}
diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
index 756eca8..10e6774 100644
--- a/drivers/firmware/efi/efivars.c
+++ b/drivers/firmware/efi/efivars.c
@@ -221,7 +221,7 @@
}
if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
- efivar_validate(name, data, size) == false) {
+ efivar_validate(vendor, name, data, size) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
@@ -447,7 +447,8 @@
}
if ((attributes & ~EFI_VARIABLE_MASK) != 0 ||
- efivar_validate(name, data, size) == false) {
+ efivar_validate(new_var->VendorGuid, name, data,
+ size) == false) {
printk(KERN_ERR "efivars: Malformed variable content\n");
return -EINVAL;
}
@@ -540,38 +541,30 @@
static int
efivar_create_sysfs_entry(struct efivar_entry *new_var)
{
- int i, short_name_size;
+ int short_name_size;
char *short_name;
- unsigned long variable_name_size;
- efi_char16_t *variable_name;
+ unsigned long utf8_name_size;
+ efi_char16_t *variable_name = new_var->var.VariableName;
int ret;
- variable_name = new_var->var.VariableName;
- variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
-
/*
- * Length of the variable bytes in ASCII, plus the '-' separator,
+ * Length of the variable bytes in UTF8, plus the '-' separator,
* plus the GUID, plus trailing NUL
*/
- short_name_size = variable_name_size / sizeof(efi_char16_t)
- + 1 + EFI_VARIABLE_GUID_LEN + 1;
+ utf8_name_size = ucs2_utf8size(variable_name);
+ short_name_size = utf8_name_size + 1 + EFI_VARIABLE_GUID_LEN + 1;
- short_name = kzalloc(short_name_size, GFP_KERNEL);
-
+ short_name = kmalloc(short_name_size, GFP_KERNEL);
if (!short_name)
return -ENOMEM;
- /* Convert Unicode to normal chars (assume top bits are 0),
- ala UTF-8 */
- for (i=0; i < (int)(variable_name_size / sizeof(efi_char16_t)); i++) {
- short_name[i] = variable_name[i] & 0xFF;
- }
+ ucs2_as_utf8(short_name, variable_name, short_name_size);
+
/* This is ugly, but necessary to separate one vendor's
private variables from another's. */
-
- *(short_name + strlen(short_name)) = '-';
+ short_name[utf8_name_size] = '-';
efi_guid_to_str(&new_var->var.VendorGuid,
- short_name + strlen(short_name));
+ short_name + utf8_name_size + 1);
new_var->kobj.kset = efivars_kset;
diff --git a/drivers/firmware/efi/vars.c b/drivers/firmware/efi/vars.c
index 70a0fb1..7f2ea21 100644
--- a/drivers/firmware/efi/vars.c
+++ b/drivers/firmware/efi/vars.c
@@ -165,67 +165,133 @@
}
struct variable_validate {
+ efi_guid_t vendor;
char *name;
bool (*validate)(efi_char16_t *var_name, int match, u8 *data,
unsigned long len);
};
+/*
+ * This is the list of variables we need to validate, as well as the
+ * whitelist for what we think is safe not to default to immutable.
+ *
+ * If it has a validate() method that's not NULL, it'll go into the
+ * validation routine. If not, it is assumed valid, but still used for
+ * whitelisting.
+ *
+ * Note that it's sorted by {vendor,name}, but globbed names must come after
+ * any other name with the same prefix.
+ */
static const struct variable_validate variable_validate[] = {
- { "BootNext", validate_uint16 },
- { "BootOrder", validate_boot_order },
- { "DriverOrder", validate_boot_order },
- { "Boot*", validate_load_option },
- { "Driver*", validate_load_option },
- { "ConIn", validate_device_path },
- { "ConInDev", validate_device_path },
- { "ConOut", validate_device_path },
- { "ConOutDev", validate_device_path },
- { "ErrOut", validate_device_path },
- { "ErrOutDev", validate_device_path },
- { "Timeout", validate_uint16 },
- { "Lang", validate_ascii_string },
- { "PlatformLang", validate_ascii_string },
- { "", NULL },
+ { EFI_GLOBAL_VARIABLE_GUID, "BootNext", validate_uint16 },
+ { EFI_GLOBAL_VARIABLE_GUID, "BootOrder", validate_boot_order },
+ { EFI_GLOBAL_VARIABLE_GUID, "Boot*", validate_load_option },
+ { EFI_GLOBAL_VARIABLE_GUID, "DriverOrder", validate_boot_order },
+ { EFI_GLOBAL_VARIABLE_GUID, "Driver*", validate_load_option },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConIn", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConInDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConOut", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ConOutDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ErrOut", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "ErrOutDev", validate_device_path },
+ { EFI_GLOBAL_VARIABLE_GUID, "Lang", validate_ascii_string },
+ { EFI_GLOBAL_VARIABLE_GUID, "OsIndications", NULL },
+ { EFI_GLOBAL_VARIABLE_GUID, "PlatformLang", validate_ascii_string },
+ { EFI_GLOBAL_VARIABLE_GUID, "Timeout", validate_uint16 },
+ { LINUX_EFI_CRASH_GUID, "*", NULL },
+ { NULL_GUID, "", NULL },
};
+static bool
+variable_matches(const char *var_name, size_t len, const char *match_name,
+ int *match)
+{
+ for (*match = 0; ; (*match)++) {
+ char c = match_name[*match];
+ char u = var_name[*match];
+
+ /* Wildcard in the matching name means we've matched */
+ if (c == '*')
+ return true;
+
+ /* Case sensitive match */
+ if (!c && *match == len)
+ return true;
+
+ if (c != u)
+ return false;
+
+ if (!c)
+ return true;
+ }
+ return true;
+}
+
bool
-efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len)
+efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+ unsigned long data_size)
{
int i;
- u16 *unicode_name = var_name;
+ unsigned long utf8_size;
+ u8 *utf8_name;
- for (i = 0; variable_validate[i].validate != NULL; i++) {
+ utf8_size = ucs2_utf8size(var_name);
+ utf8_name = kmalloc(utf8_size + 1, GFP_KERNEL);
+ if (!utf8_name)
+ return false;
+
+ ucs2_as_utf8(utf8_name, var_name, utf8_size);
+ utf8_name[utf8_size] = '\0';
+
+ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
const char *name = variable_validate[i].name;
- int match;
+ int match = 0;
- for (match = 0; ; match++) {
- char c = name[match];
- u16 u = unicode_name[match];
+ if (efi_guidcmp(vendor, variable_validate[i].vendor))
+ continue;
- /* All special variables are plain ascii */
- if (u > 127)
- return true;
-
- /* Wildcard in the matching name means we've matched */
- if (c == '*')
- return variable_validate[i].validate(var_name,
- match, data, len);
-
- /* Case sensitive match */
- if (c != u)
+ if (variable_matches(utf8_name, utf8_size+1, name, &match)) {
+ if (variable_validate[i].validate == NULL)
break;
-
- /* Reached the end of the string while matching */
- if (!c)
- return variable_validate[i].validate(var_name,
- match, data, len);
+ kfree(utf8_name);
+ return variable_validate[i].validate(var_name, match,
+ data, data_size);
}
}
-
+ kfree(utf8_name);
return true;
}
EXPORT_SYMBOL_GPL(efivar_validate);
+bool
+efivar_variable_is_removable(efi_guid_t vendor, const char *var_name,
+ size_t len)
+{
+ int i;
+ bool found = false;
+ int match = 0;
+
+ /*
+ * Check if our variable is in the validated variables list
+ */
+ for (i = 0; variable_validate[i].name[0] != '\0'; i++) {
+ if (efi_guidcmp(variable_validate[i].vendor, vendor))
+ continue;
+
+ if (variable_matches(var_name, len,
+ variable_validate[i].name, &match)) {
+ found = true;
+ break;
+ }
+ }
+
+ /*
+ * If it's in our list, it is removable.
+ */
+ return found;
+}
+EXPORT_SYMBOL_GPL(efivar_variable_is_removable);
+
static efi_status_t
check_var_size(u32 attributes, unsigned long size)
{
@@ -852,7 +918,7 @@
*set = false;
- if (efivar_validate(name, data, *size) == false)
+ if (efivar_validate(*vendor, name, data, *size) == false)
return -EINVAL;
/*
diff --git a/drivers/gpio/gpio-altera.c b/drivers/gpio/gpio-altera.c
index 2aeaebd..3f87a03 100644
--- a/drivers/gpio/gpio-altera.c
+++ b/drivers/gpio/gpio-altera.c
@@ -312,8 +312,8 @@
handle_simple_irq, IRQ_TYPE_NONE);
if (ret) {
- dev_info(&pdev->dev, "could not add irqchip\n");
- return ret;
+ dev_err(&pdev->dev, "could not add irqchip\n");
+ goto teardown;
}
gpiochip_set_chained_irqchip(&altera_gc->mmchip.gc,
@@ -326,6 +326,7 @@
skip_irq:
return 0;
teardown:
+ of_mm_gpiochip_remove(&altera_gc->mmchip);
pr_err("%s: registration failed with status %d\n",
node->full_name, ret);
diff --git a/drivers/gpio/gpio-davinci.c b/drivers/gpio/gpio-davinci.c
index ec58f42..cd007a6 100644
--- a/drivers/gpio/gpio-davinci.c
+++ b/drivers/gpio/gpio-davinci.c
@@ -195,7 +195,7 @@
static int davinci_gpio_probe(struct platform_device *pdev)
{
int i, base;
- unsigned ngpio;
+ unsigned ngpio, nbank;
struct davinci_gpio_controller *chips;
struct davinci_gpio_platform_data *pdata;
struct davinci_gpio_regs __iomem *regs;
@@ -224,8 +224,9 @@
if (WARN_ON(ARCH_NR_GPIOS < ngpio))
ngpio = ARCH_NR_GPIOS;
+ nbank = DIV_ROUND_UP(ngpio, 32);
chips = devm_kzalloc(dev,
- ngpio * sizeof(struct davinci_gpio_controller),
+ nbank * sizeof(struct davinci_gpio_controller),
GFP_KERNEL);
if (!chips)
return -ENOMEM;
@@ -511,7 +512,7 @@
return irq;
}
- irq_domain = irq_domain_add_legacy(NULL, ngpio, irq, 0,
+ irq_domain = irq_domain_add_legacy(dev->of_node, ngpio, irq, 0,
&davinci_gpio_irq_ops,
chips);
if (!irq_domain) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 82edf95..5e7770f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -87,6 +87,8 @@
extern int amdgpu_sched_hw_submission;
extern int amdgpu_enable_semaphores;
extern int amdgpu_powerplay;
+extern unsigned amdgpu_pcie_gen_cap;
+extern unsigned amdgpu_pcie_lane_cap;
#define AMDGPU_WAIT_IDLE_TIMEOUT_IN_MS 3000
#define AMDGPU_MAX_USEC_TIMEOUT 100000 /* 100 ms */
@@ -132,47 +134,6 @@
#define AMDGPU_RESET_VCE (1 << 13)
#define AMDGPU_RESET_VCE1 (1 << 14)
-/* CG block flags */
-#define AMDGPU_CG_BLOCK_GFX (1 << 0)
-#define AMDGPU_CG_BLOCK_MC (1 << 1)
-#define AMDGPU_CG_BLOCK_SDMA (1 << 2)
-#define AMDGPU_CG_BLOCK_UVD (1 << 3)
-#define AMDGPU_CG_BLOCK_VCE (1 << 4)
-#define AMDGPU_CG_BLOCK_HDP (1 << 5)
-#define AMDGPU_CG_BLOCK_BIF (1 << 6)
-
-/* CG flags */
-#define AMDGPU_CG_SUPPORT_GFX_MGCG (1 << 0)
-#define AMDGPU_CG_SUPPORT_GFX_MGLS (1 << 1)
-#define AMDGPU_CG_SUPPORT_GFX_CGCG (1 << 2)
-#define AMDGPU_CG_SUPPORT_GFX_CGLS (1 << 3)
-#define AMDGPU_CG_SUPPORT_GFX_CGTS (1 << 4)
-#define AMDGPU_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
-#define AMDGPU_CG_SUPPORT_GFX_CP_LS (1 << 6)
-#define AMDGPU_CG_SUPPORT_GFX_RLC_LS (1 << 7)
-#define AMDGPU_CG_SUPPORT_MC_LS (1 << 8)
-#define AMDGPU_CG_SUPPORT_MC_MGCG (1 << 9)
-#define AMDGPU_CG_SUPPORT_SDMA_LS (1 << 10)
-#define AMDGPU_CG_SUPPORT_SDMA_MGCG (1 << 11)
-#define AMDGPU_CG_SUPPORT_BIF_LS (1 << 12)
-#define AMDGPU_CG_SUPPORT_UVD_MGCG (1 << 13)
-#define AMDGPU_CG_SUPPORT_VCE_MGCG (1 << 14)
-#define AMDGPU_CG_SUPPORT_HDP_LS (1 << 15)
-#define AMDGPU_CG_SUPPORT_HDP_MGCG (1 << 16)
-
-/* PG flags */
-#define AMDGPU_PG_SUPPORT_GFX_PG (1 << 0)
-#define AMDGPU_PG_SUPPORT_GFX_SMG (1 << 1)
-#define AMDGPU_PG_SUPPORT_GFX_DMG (1 << 2)
-#define AMDGPU_PG_SUPPORT_UVD (1 << 3)
-#define AMDGPU_PG_SUPPORT_VCE (1 << 4)
-#define AMDGPU_PG_SUPPORT_CP (1 << 5)
-#define AMDGPU_PG_SUPPORT_GDS (1 << 6)
-#define AMDGPU_PG_SUPPORT_RLC_SMU_HS (1 << 7)
-#define AMDGPU_PG_SUPPORT_SDMA (1 << 8)
-#define AMDGPU_PG_SUPPORT_ACP (1 << 9)
-#define AMDGPU_PG_SUPPORT_SAMU (1 << 10)
-
/* GFX current status */
#define AMDGPU_GFX_NORMAL_MODE 0x00000000L
#define AMDGPU_GFX_SAFE_MODE 0x00000001L
@@ -606,8 +567,6 @@
uint32_t align;
};
-struct amdgpu_sa_bo;
-
/* sub-allocation buffer */
struct amdgpu_sa_bo {
struct list_head olist;
@@ -2360,6 +2319,8 @@
int amdgpu_ttm_tt_set_userptr(struct ttm_tt *ttm, uint64_t addr,
uint32_t flags);
bool amdgpu_ttm_tt_has_userptr(struct ttm_tt *ttm);
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end);
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm);
uint32_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
struct ttm_mem_reg *mem);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
index a081dda..7a4b101 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cgs.c
@@ -795,6 +795,12 @@
case CGS_SYSTEM_INFO_PCIE_MLW:
sys_info->value = adev->pm.pcie_mlw_mask;
break;
+ case CGS_SYSTEM_INFO_CG_FLAGS:
+ sys_info->value = adev->cg_flags;
+ break;
+ case CGS_SYSTEM_INFO_PG_FLAGS:
+ sys_info->value = adev->pg_flags;
+ break;
default:
return -ENODEV;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 6553146..51bfc11 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1795,15 +1795,20 @@
}
/* post card */
- amdgpu_atom_asic_init(adev->mode_info.atom_context);
+ if (!amdgpu_card_posted(adev))
+ amdgpu_atom_asic_init(adev->mode_info.atom_context);
r = amdgpu_resume(adev);
+ if (r)
+ DRM_ERROR("amdgpu_resume failed (%d).\n", r);
amdgpu_fence_driver_resume(adev);
- r = amdgpu_ib_ring_tests(adev);
- if (r)
- DRM_ERROR("ib ring test failed (%d).\n", r);
+ if (resume) {
+ r = amdgpu_ib_ring_tests(adev);
+ if (r)
+ DRM_ERROR("ib ring test failed (%d).\n", r);
+ }
r = amdgpu_late_init(adev);
if (r)
@@ -1933,80 +1938,97 @@
return r;
}
+#define AMDGPU_DEFAULT_PCIE_GEN_MASK 0x30007 /* gen: chipset 1/2, asic 1/2/3 */
+#define AMDGPU_DEFAULT_PCIE_MLW_MASK 0x2f0000 /* 1/2/4/8/16 lanes */
+
void amdgpu_get_pcie_info(struct amdgpu_device *adev)
{
u32 mask;
int ret;
- if (pci_is_root_bus(adev->pdev->bus))
+ if (amdgpu_pcie_gen_cap)
+ adev->pm.pcie_gen_mask = amdgpu_pcie_gen_cap;
+
+ if (amdgpu_pcie_lane_cap)
+ adev->pm.pcie_mlw_mask = amdgpu_pcie_lane_cap;
+
+ /* covers APUs as well */
+ if (pci_is_root_bus(adev->pdev->bus)) {
+ if (adev->pm.pcie_gen_mask == 0)
+ adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+ if (adev->pm.pcie_mlw_mask == 0)
+ adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
return;
-
- if (amdgpu_pcie_gen2 == 0)
- return;
-
- if (adev->flags & AMD_IS_APU)
- return;
-
- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
- if (!ret) {
- adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
- CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
- CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
-
- if (mask & DRM_PCIE_SPEED_25)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
- if (mask & DRM_PCIE_SPEED_50)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
- if (mask & DRM_PCIE_SPEED_80)
- adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
}
- ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
- if (!ret) {
- switch (mask) {
- case 32:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
- break;
- case 16:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
- break;
- case 12:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
- break;
- case 8:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
- break;
- case 4:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
- break;
- case 2:
- adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
- CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
- break;
- case 1:
- adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
- break;
- default:
- break;
+
+ if (adev->pm.pcie_gen_mask == 0) {
+ ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
+ if (!ret) {
+ adev->pm.pcie_gen_mask = (CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN1 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN2 |
+ CAIL_ASIC_PCIE_LINK_SPEED_SUPPORT_GEN3);
+
+ if (mask & DRM_PCIE_SPEED_25)
+ adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN1;
+ if (mask & DRM_PCIE_SPEED_50)
+ adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN2;
+ if (mask & DRM_PCIE_SPEED_80)
+ adev->pm.pcie_gen_mask |= CAIL_PCIE_LINK_SPEED_SUPPORT_GEN3;
+ } else {
+ adev->pm.pcie_gen_mask = AMDGPU_DEFAULT_PCIE_GEN_MASK;
+ }
+ }
+ if (adev->pm.pcie_mlw_mask == 0) {
+ ret = drm_pcie_get_max_link_width(adev->ddev, &mask);
+ if (!ret) {
+ switch (mask) {
+ case 32:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X32 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case 16:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X16 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case 12:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X12 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case 8:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X8 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case 4:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X4 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case 2:
+ adev->pm.pcie_mlw_mask = (CAIL_PCIE_LINK_WIDTH_SUPPORT_X2 |
+ CAIL_PCIE_LINK_WIDTH_SUPPORT_X1);
+ break;
+ case 1:
+ adev->pm.pcie_mlw_mask = CAIL_PCIE_LINK_WIDTH_SUPPORT_X1;
+ break;
+ default:
+ break;
+ }
+ } else {
+ adev->pm.pcie_mlw_mask = AMDGPU_DEFAULT_PCIE_MLW_MASK;
}
}
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
index acd066d0..8297bc3 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c
@@ -72,8 +72,8 @@
struct drm_crtc *crtc = &amdgpuCrtc->base;
unsigned long flags;
- unsigned i;
- int vpos, hpos, stat, min_udelay;
+ unsigned i, repcnt = 4;
+ int vpos, hpos, stat, min_udelay = 0;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
amdgpu_flip_wait_fence(adev, &work->excl);
@@ -96,7 +96,7 @@
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- for (;;) {
+ while (amdgpuCrtc->enabled && repcnt--) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -114,10 +114,22 @@
/* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ if (min_udelay > vblank->framedur_ns / 2000) {
+ /* Don't wait ridiculously long - something is wrong */
+ repcnt = 0;
+ break;
+ }
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
+ if (!repcnt)
+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+ "framedur %d, linedur %d, stat %d, vpos %d, "
+ "hpos %d\n", work->crtc_id, min_udelay,
+ vblank->framedur_ns / 1000,
+ vblank->linedur_ns / 1000, stat, vpos, hpos);
+
/* do the flip (mmio) */
adev->mode_info.funcs->page_flip(adev, work->crtc_id, work->base);
/* set the flip status */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 9c1af89..9ef1db8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -83,6 +83,8 @@
int amdgpu_sched_hw_submission = 2;
int amdgpu_enable_semaphores = 0;
int amdgpu_powerplay = -1;
+unsigned amdgpu_pcie_gen_cap = 0;
+unsigned amdgpu_pcie_lane_cap = 0;
MODULE_PARM_DESC(vramlimit, "Restrict VRAM for testing, in megabytes");
module_param_named(vramlimit, amdgpu_vram_limit, int, 0600);
@@ -170,6 +172,12 @@
module_param_named(powerplay, amdgpu_powerplay, int, 0444);
#endif
+MODULE_PARM_DESC(pcie_gen_cap, "PCIE Gen Caps (0: autodetect (default))");
+module_param_named(pcie_gen_cap, amdgpu_pcie_gen_cap, uint, 0444);
+
+MODULE_PARM_DESC(pcie_lane_cap, "PCIE Lane Caps (0: autodetect (default))");
+module_param_named(pcie_lane_cap, amdgpu_pcie_lane_cap, uint, 0444);
+
static struct pci_device_id pciidlist[] = {
#ifdef CONFIG_DRM_AMDGPU_CIK
/* Kaveri */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index 7380f78..d20c2a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -596,7 +596,8 @@
break;
}
ttm_eu_backoff_reservation(&ticket, &list);
- if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE))
+ if (!r && !(args->flags & AMDGPU_VM_DELAY_UPDATE) &&
+ !amdgpu_vm_debug)
amdgpu_gem_va_update_vm(adev, bo_va, args->operation);
drm_gem_object_unreference_unlocked(gobj);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index b1969f2..d4e2780 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -142,7 +142,8 @@
list_for_each_entry(bo, &node->bos, mn_list) {
- if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+ if (!amdgpu_ttm_tt_affect_userptr(bo->tbo.ttm, start,
+ end))
continue;
r = amdgpu_bo_reserve(bo, true);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7d8d84e..66855b6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -113,6 +113,10 @@
struct drm_device *ddev = dev_get_drvdata(dev);
struct amdgpu_device *adev = ddev->dev_private;
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return snprintf(buf, PAGE_SIZE, "off\n");
+
if (adev->pp_enabled) {
enum amd_dpm_forced_level level;
@@ -140,6 +144,11 @@
enum amdgpu_dpm_forced_level level;
int ret = 0;
+ /* Can't force performance level when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (strncmp("low", buf, strlen("low")) == 0) {
level = AMDGPU_DPM_FORCED_LEVEL_LOW;
} else if (strncmp("high", buf, strlen("high")) == 0) {
@@ -157,6 +166,7 @@
mutex_lock(&adev->pm.mutex);
if (adev->pm.dpm.thermal_active) {
count = -EINVAL;
+ mutex_unlock(&adev->pm.mutex);
goto fail;
}
ret = amdgpu_dpm_force_performance_level(adev, level);
@@ -167,8 +177,6 @@
mutex_unlock(&adev->pm.mutex);
}
fail:
- mutex_unlock(&adev->pm.mutex);
-
return count;
}
@@ -182,8 +190,14 @@
char *buf)
{
struct amdgpu_device *adev = dev_get_drvdata(dev);
+ struct drm_device *ddev = adev->ddev;
int temp;
+ /* Can't get temperature when the card is off */
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON))
+ return -EINVAL;
+
if (!adev->pp_enabled && !adev->pm.funcs->get_temperature)
temp = 0;
else
@@ -634,8 +648,6 @@
/* update display watermarks based on new power state */
amdgpu_display_bandwidth_update(adev);
- /* update displays */
- amdgpu_dpm_display_configuration_changed(adev);
adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs;
adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count;
@@ -655,6 +667,9 @@
amdgpu_dpm_post_set_power_state(adev);
+ /* update displays */
+ amdgpu_dpm_display_configuration_changed(adev);
+
if (adev->pm.funcs->force_performance_level) {
if (adev->pm.dpm.thermal_active) {
enum amdgpu_dpm_forced_level level = adev->pm.dpm.forced_level;
@@ -847,12 +862,16 @@
struct drm_info_node *node = (struct drm_info_node *) m->private;
struct drm_device *dev = node->minor->dev;
struct amdgpu_device *adev = dev->dev_private;
+ struct drm_device *ddev = adev->ddev;
if (!adev->pm.dpm_enabled) {
seq_printf(m, "dpm not enabled\n");
return 0;
}
- if (adev->pp_enabled) {
+ if ((adev->flags & AMD_IS_PX) &&
+ (ddev->switch_power_state != DRM_SWITCH_POWER_ON)) {
+ seq_printf(m, "PX asic powered off\n");
+ } else if (adev->pp_enabled) {
amdgpu_dpm_debugfs_print_current_performance_level(adev, m);
} else {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
index 8b88edb..ca72a2e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sa.c
@@ -354,12 +354,15 @@
for (i = 0, count = 0; i < AMDGPU_MAX_RINGS; ++i)
if (fences[i])
- fences[count++] = fences[i];
+ fences[count++] = fence_get(fences[i]);
if (count) {
spin_unlock(&sa_manager->wq.lock);
t = fence_wait_any_timeout(fences, count, false,
MAX_SCHEDULE_TIMEOUT);
+ for (i = 0; i < count; ++i)
+ fence_put(fences[i]);
+
r = (t > 0) ? 0 : t;
spin_lock(&sa_manager->wq.lock);
} else {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 55cf05e..1cbb16e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -712,7 +712,7 @@
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(adev->pdev, gtt->ttm.dma_address[i])) {
- while (--i) {
+ while (i--) {
pci_unmap_page(adev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
@@ -783,6 +783,25 @@
return !!gtt->userptr;
}
+bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
+ unsigned long end)
+{
+ struct amdgpu_ttm_tt *gtt = (void *)ttm;
+ unsigned long size;
+
+ if (gtt == NULL)
+ return false;
+
+ if (gtt->ttm.ttm.state != tt_bound || !gtt->userptr)
+ return false;
+
+ size = (unsigned long)gtt->ttm.ttm.num_pages * PAGE_SIZE;
+ if (gtt->userptr > end || gtt->userptr + size <= start)
+ return false;
+
+ return true;
+}
+
bool amdgpu_ttm_tt_is_readonly(struct ttm_tt *ttm)
{
struct amdgpu_ttm_tt *gtt = (void *)ttm;
diff --git a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
index 8b4731d..474ca02 100644
--- a/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/ci_dpm.c
@@ -31,6 +31,7 @@
#include "ci_dpm.h"
#include "gfx_v7_0.h"
#include "atom.h"
+#include "amd_pcie.h"
#include <linux/seq_file.h>
#include "smu/smu_7_0_1_d.h"
@@ -5835,18 +5836,16 @@
u8 frev, crev;
struct ci_power_info *pi;
int ret;
- u32 mask;
pi = kzalloc(sizeof(struct ci_power_info), GFP_KERNEL);
if (pi == NULL)
return -ENOMEM;
adev->pm.dpm.priv = pi;
- ret = drm_pcie_get_speed_cap_mask(adev->ddev, &mask);
- if (ret)
- pi->sys_pcie_mask = 0;
- else
- pi->sys_pcie_mask = mask;
+ pi->sys_pcie_mask =
+ (adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK) >>
+ CAIL_PCIE_LINK_SPEED_SUPPORT_SHIFT;
+
pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
pi->pcie_gen_performance.max = AMDGPU_PCIE_GEN1;
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index fd9c958..155965e 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1762,6 +1762,9 @@
if (amdgpu_aspm == 0)
return;
+ if (pci_is_root_bus(adev->pdev->bus))
+ return;
+
/* XXX double check APUs */
if (adev->flags & AMD_IS_APU)
return;
@@ -2332,72 +2335,72 @@
switch (adev->asic_type) {
case CHIP_BONAIRE:
adev->cg_flags =
- AMDGPU_CG_SUPPORT_GFX_MGCG |
- AMDGPU_CG_SUPPORT_GFX_MGLS |
- /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
- AMDGPU_CG_SUPPORT_GFX_CGLS |
- AMDGPU_CG_SUPPORT_GFX_CGTS |
- AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
- AMDGPU_CG_SUPPORT_GFX_CP_LS |
- AMDGPU_CG_SUPPORT_MC_LS |
- AMDGPU_CG_SUPPORT_MC_MGCG |
- AMDGPU_CG_SUPPORT_SDMA_MGCG |
- AMDGPU_CG_SUPPORT_SDMA_LS |
- AMDGPU_CG_SUPPORT_BIF_LS |
- AMDGPU_CG_SUPPORT_VCE_MGCG |
- AMDGPU_CG_SUPPORT_UVD_MGCG |
- AMDGPU_CG_SUPPORT_HDP_LS |
- AMDGPU_CG_SUPPORT_HDP_MGCG;
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x14;
break;
case CHIP_HAWAII:
adev->cg_flags =
- AMDGPU_CG_SUPPORT_GFX_MGCG |
- AMDGPU_CG_SUPPORT_GFX_MGLS |
- /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
- AMDGPU_CG_SUPPORT_GFX_CGLS |
- AMDGPU_CG_SUPPORT_GFX_CGTS |
- AMDGPU_CG_SUPPORT_GFX_CP_LS |
- AMDGPU_CG_SUPPORT_MC_LS |
- AMDGPU_CG_SUPPORT_MC_MGCG |
- AMDGPU_CG_SUPPORT_SDMA_MGCG |
- AMDGPU_CG_SUPPORT_SDMA_LS |
- AMDGPU_CG_SUPPORT_BIF_LS |
- AMDGPU_CG_SUPPORT_VCE_MGCG |
- AMDGPU_CG_SUPPORT_UVD_MGCG |
- AMDGPU_CG_SUPPORT_HDP_LS |
- AMDGPU_CG_SUPPORT_HDP_MGCG;
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_MC_LS |
+ AMD_CG_SUPPORT_MC_MGCG |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags = 0;
adev->external_rev_id = 0x28;
break;
case CHIP_KAVERI:
adev->cg_flags =
- AMDGPU_CG_SUPPORT_GFX_MGCG |
- AMDGPU_CG_SUPPORT_GFX_MGLS |
- /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
- AMDGPU_CG_SUPPORT_GFX_CGLS |
- AMDGPU_CG_SUPPORT_GFX_CGTS |
- AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
- AMDGPU_CG_SUPPORT_GFX_CP_LS |
- AMDGPU_CG_SUPPORT_SDMA_MGCG |
- AMDGPU_CG_SUPPORT_SDMA_LS |
- AMDGPU_CG_SUPPORT_BIF_LS |
- AMDGPU_CG_SUPPORT_VCE_MGCG |
- AMDGPU_CG_SUPPORT_UVD_MGCG |
- AMDGPU_CG_SUPPORT_HDP_LS |
- AMDGPU_CG_SUPPORT_HDP_MGCG;
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags =
- /*AMDGPU_PG_SUPPORT_GFX_PG |
- AMDGPU_PG_SUPPORT_GFX_SMG |
- AMDGPU_PG_SUPPORT_GFX_DMG |*/
- AMDGPU_PG_SUPPORT_UVD |
- /*AMDGPU_PG_SUPPORT_VCE |
- AMDGPU_PG_SUPPORT_CP |
- AMDGPU_PG_SUPPORT_GDS |
- AMDGPU_PG_SUPPORT_RLC_SMU_HS |
- AMDGPU_PG_SUPPORT_ACP |
- AMDGPU_PG_SUPPORT_SAMU |*/
+ /*AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |*/
+ AMD_PG_SUPPORT_UVD |
+ /*AMD_PG_SUPPORT_VCE |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_ACP |
+ AMD_PG_SUPPORT_SAMU |*/
0;
if (adev->pdev->device == 0x1312 ||
adev->pdev->device == 0x1316 ||
@@ -2409,29 +2412,29 @@
case CHIP_KABINI:
case CHIP_MULLINS:
adev->cg_flags =
- AMDGPU_CG_SUPPORT_GFX_MGCG |
- AMDGPU_CG_SUPPORT_GFX_MGLS |
- /*AMDGPU_CG_SUPPORT_GFX_CGCG |*/
- AMDGPU_CG_SUPPORT_GFX_CGLS |
- AMDGPU_CG_SUPPORT_GFX_CGTS |
- AMDGPU_CG_SUPPORT_GFX_CGTS_LS |
- AMDGPU_CG_SUPPORT_GFX_CP_LS |
- AMDGPU_CG_SUPPORT_SDMA_MGCG |
- AMDGPU_CG_SUPPORT_SDMA_LS |
- AMDGPU_CG_SUPPORT_BIF_LS |
- AMDGPU_CG_SUPPORT_VCE_MGCG |
- AMDGPU_CG_SUPPORT_UVD_MGCG |
- AMDGPU_CG_SUPPORT_HDP_LS |
- AMDGPU_CG_SUPPORT_HDP_MGCG;
+ AMD_CG_SUPPORT_GFX_MGCG |
+ AMD_CG_SUPPORT_GFX_MGLS |
+ /*AMD_CG_SUPPORT_GFX_CGCG |*/
+ AMD_CG_SUPPORT_GFX_CGLS |
+ AMD_CG_SUPPORT_GFX_CGTS |
+ AMD_CG_SUPPORT_GFX_CGTS_LS |
+ AMD_CG_SUPPORT_GFX_CP_LS |
+ AMD_CG_SUPPORT_SDMA_MGCG |
+ AMD_CG_SUPPORT_SDMA_LS |
+ AMD_CG_SUPPORT_BIF_LS |
+ AMD_CG_SUPPORT_VCE_MGCG |
+ AMD_CG_SUPPORT_UVD_MGCG |
+ AMD_CG_SUPPORT_HDP_LS |
+ AMD_CG_SUPPORT_HDP_MGCG;
adev->pg_flags =
- /*AMDGPU_PG_SUPPORT_GFX_PG |
- AMDGPU_PG_SUPPORT_GFX_SMG | */
- AMDGPU_PG_SUPPORT_UVD |
- /*AMDGPU_PG_SUPPORT_VCE |
- AMDGPU_PG_SUPPORT_CP |
- AMDGPU_PG_SUPPORT_GDS |
- AMDGPU_PG_SUPPORT_RLC_SMU_HS |
- AMDGPU_PG_SUPPORT_SAMU |*/
+ /*AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG | */
+ AMD_PG_SUPPORT_UVD |
+ /*AMD_PG_SUPPORT_VCE |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_SAMU |*/
0;
if (adev->asic_type == CHIP_KABINI) {
if (adev->rev_id == 0)
diff --git a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
index 5f712ce..c55ecf0 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik_sdma.c
@@ -885,7 +885,7 @@
{
u32 orig, data;
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_MGCG)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_MGCG)) {
WREG32(mmSDMA0_CLK_CTRL + SDMA0_REGISTER_OFFSET, 0x00000100);
WREG32(mmSDMA0_CLK_CTRL + SDMA1_REGISTER_OFFSET, 0x00000100);
} else {
@@ -906,7 +906,7 @@
{
u32 orig, data;
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_SDMA_LS)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_SDMA_LS)) {
orig = data = RREG32(mmSDMA0_POWER_CNTL + SDMA0_REGISTER_OFFSET);
data |= 0x100;
if (orig != data)
diff --git a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
index 4dd17f2..9056355 100644
--- a/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/cz_dpm.c
@@ -445,13 +445,13 @@
pi->gfx_pg_threshold = 500;
pi->caps_fps = true;
/* uvd */
- pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
+ pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
pi->caps_uvd_dpm = true;
/* vce */
- pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
+ pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
pi->caps_vce_dpm = true;
/* acp */
- pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
+ pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
pi->caps_acp_dpm = true;
pi->caps_stable_power_state = false;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
index 6c76139..7732059 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v7_0.c
@@ -4109,7 +4109,7 @@
orig = data = RREG32(mmRLC_CGCG_CGLS_CTRL);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGCG)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGCG)) {
gfx_v7_0_enable_gui_idle_interrupt(adev, true);
tmp = gfx_v7_0_halt_rlc(adev);
@@ -4147,9 +4147,9 @@
{
u32 data, orig, tmp = 0;
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGCG)) {
- if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) {
- if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CP_LS) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGCG)) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CP_LS) {
orig = data = RREG32(mmCP_MEM_SLP_CNTL);
data |= CP_MEM_SLP_CNTL__CP_MEM_LS_EN_MASK;
if (orig != data)
@@ -4176,14 +4176,14 @@
gfx_v7_0_update_rlc(adev, tmp);
- if (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS) {
orig = data = RREG32(mmCGTS_SM_CTRL_REG);
data &= ~CGTS_SM_CTRL_REG__SM_MODE_MASK;
data |= (0x2 << CGTS_SM_CTRL_REG__SM_MODE__SHIFT);
data |= CGTS_SM_CTRL_REG__SM_MODE_ENABLE_MASK;
data &= ~CGTS_SM_CTRL_REG__OVERRIDE_MASK;
- if ((adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_MGLS) &&
- (adev->cg_flags & AMDGPU_CG_SUPPORT_GFX_CGTS_LS))
+ if ((adev->cg_flags & AMD_CG_SUPPORT_GFX_MGLS) &&
+ (adev->cg_flags & AMD_CG_SUPPORT_GFX_CGTS_LS))
data &= ~CGTS_SM_CTRL_REG__LS_OVERRIDE_MASK;
data &= ~CGTS_SM_CTRL_REG__ON_MONITOR_ADD_MASK;
data |= CGTS_SM_CTRL_REG__ON_MONITOR_ADD_EN_MASK;
@@ -4249,7 +4249,7 @@
u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL);
- if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
else
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PU_ENABLE_MASK;
@@ -4263,7 +4263,7 @@
u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL);
- if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_RLC_SMU_HS))
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_RLC_SMU_HS))
data |= RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
else
data &= ~RLC_PG_CNTL__SMU_CLK_SLOWDOWN_ON_PD_ENABLE_MASK;
@@ -4276,7 +4276,7 @@
u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL);
- if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_CP))
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_CP))
data &= ~0x8000;
else
data |= 0x8000;
@@ -4289,7 +4289,7 @@
u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL);
- if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GDS))
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GDS))
data &= ~0x2000;
else
data |= 0x2000;
@@ -4370,7 +4370,7 @@
{
u32 data, orig;
- if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG)) {
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG)) {
orig = data = RREG32(mmRLC_PG_CNTL);
data |= RLC_PG_CNTL__GFX_POWER_GATING_ENABLE_MASK;
if (orig != data)
@@ -4442,7 +4442,7 @@
u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL);
- if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_SMG))
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_SMG))
data |= RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
else
data &= ~RLC_PG_CNTL__STATIC_PER_CU_PG_ENABLE_MASK;
@@ -4456,7 +4456,7 @@
u32 data, orig;
orig = data = RREG32(mmRLC_PG_CNTL);
- if (enable && (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_DMG))
+ if (enable && (adev->pg_flags & AMD_PG_SUPPORT_GFX_DMG))
data |= RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
else
data &= ~RLC_PG_CNTL__DYN_PER_CU_PG_ENABLE_MASK;
@@ -4623,15 +4623,15 @@
static void gfx_v7_0_init_pg(struct amdgpu_device *adev)
{
- if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
- AMDGPU_PG_SUPPORT_GFX_SMG |
- AMDGPU_PG_SUPPORT_GFX_DMG |
- AMDGPU_PG_SUPPORT_CP |
- AMDGPU_PG_SUPPORT_GDS |
- AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
gfx_v7_0_enable_sclk_slowdown_on_pu(adev, true);
gfx_v7_0_enable_sclk_slowdown_on_pd(adev, true);
- if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
gfx_v7_0_init_gfx_cgpg(adev);
gfx_v7_0_enable_cp_pg(adev, true);
gfx_v7_0_enable_gds_pg(adev, true);
@@ -4643,14 +4643,14 @@
static void gfx_v7_0_fini_pg(struct amdgpu_device *adev)
{
- if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
- AMDGPU_PG_SUPPORT_GFX_SMG |
- AMDGPU_PG_SUPPORT_GFX_DMG |
- AMDGPU_PG_SUPPORT_CP |
- AMDGPU_PG_SUPPORT_GDS |
- AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
gfx_v7_0_update_gfx_pg(adev, false);
- if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
gfx_v7_0_enable_cp_pg(adev, false);
gfx_v7_0_enable_gds_pg(adev, false);
}
@@ -5527,14 +5527,14 @@
if (state == AMD_PG_STATE_GATE)
gate = true;
- if (adev->pg_flags & (AMDGPU_PG_SUPPORT_GFX_PG |
- AMDGPU_PG_SUPPORT_GFX_SMG |
- AMDGPU_PG_SUPPORT_GFX_DMG |
- AMDGPU_PG_SUPPORT_CP |
- AMDGPU_PG_SUPPORT_GDS |
- AMDGPU_PG_SUPPORT_RLC_SMU_HS)) {
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_PG |
+ AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_GFX_DMG |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GDS |
+ AMD_PG_SUPPORT_RLC_SMU_HS)) {
gfx_v7_0_update_gfx_pg(adev, gate);
- if (adev->pg_flags & AMDGPU_PG_SUPPORT_GFX_PG) {
+ if (adev->pg_flags & AMD_PG_SUPPORT_GFX_PG) {
gfx_v7_0_enable_cp_pg(adev, gate);
gfx_v7_0_enable_gds_pg(adev, gate);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 8f8ec37..1c40bd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -4995,7 +4995,7 @@
case AMDGPU_IRQ_STATE_ENABLE:
cp_int_cntl = RREG32(mmCP_INT_CNTL_RING0);
cp_int_cntl = REG_SET_FIELD(cp_int_cntl, CP_INT_CNTL_RING0,
- PRIV_REG_INT_ENABLE, 0);
+ PRIV_REG_INT_ENABLE, 1);
WREG32(mmCP_INT_CNTL_RING0, cp_int_cntl);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 8aa2991..b806079 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -792,7 +792,7 @@
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_LS))
data |= mc_cg_ls_en[i];
else
data &= ~mc_cg_ls_en[i];
@@ -809,7 +809,7 @@
for (i = 0; i < ARRAY_SIZE(mc_cg_registers); i++) {
orig = data = RREG32(mc_cg_registers[i]);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_MC_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_MC_MGCG))
data |= mc_cg_en[i];
else
data &= ~mc_cg_en[i];
@@ -825,7 +825,7 @@
orig = data = RREG32_PCIE(ixPCIE_CNTL2);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_BIF_LS)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) {
data = REG_SET_FIELD(data, PCIE_CNTL2, SLV_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, MST_MEM_LS_EN, 1);
data = REG_SET_FIELD(data, PCIE_CNTL2, REPLAY_MEM_LS_EN, 1);
@@ -848,7 +848,7 @@
orig = data = RREG32(mmHDP_HOST_PATH_CNTL);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_MGCG))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_MGCG))
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 0);
else
data = REG_SET_FIELD(data, HDP_HOST_PATH_CNTL, CLOCK_GATING_DIS, 1);
@@ -864,7 +864,7 @@
orig = data = RREG32(mmHDP_MEM_POWER_LS);
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_HDP_LS))
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_HDP_LS))
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 1);
else
data = REG_SET_FIELD(data, HDP_MEM_POWER_LS, LS_ENABLE, 0);
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 7e9154c..654d767 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -2859,11 +2859,11 @@
pi->voltage_drop_t = 0;
pi->caps_sclk_throttle_low_notification = false;
pi->caps_fps = false; /* true? */
- pi->caps_uvd_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_UVD) ? true : false;
+ pi->caps_uvd_pg = (adev->pg_flags & AMD_PG_SUPPORT_UVD) ? true : false;
pi->caps_uvd_dpm = true;
- pi->caps_vce_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_VCE) ? true : false;
- pi->caps_samu_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_SAMU) ? true : false;
- pi->caps_acp_pg = (adev->pg_flags & AMDGPU_PG_SUPPORT_ACP) ? true : false;
+ pi->caps_vce_pg = (adev->pg_flags & AMD_PG_SUPPORT_VCE) ? true : false;
+ pi->caps_samu_pg = (adev->pg_flags & AMD_PG_SUPPORT_SAMU) ? true : false;
+ pi->caps_acp_pg = (adev->pg_flags & AMD_PG_SUPPORT_ACP) ? true : false;
pi->caps_stable_p_state = false;
ret = kv_parse_sys_info_table(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 5e9f73a..fbd3767 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -611,7 +611,7 @@
{
u32 orig, data;
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
data = 0xfff;
WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
@@ -830,6 +830,9 @@
bool gate = false;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+ return 0;
+
if (state == AMD_CG_STATE_GATE)
gate = true;
@@ -848,7 +851,10 @@
* revisit this when there is a cleaner line between
* the smc and the hw blocks
*/
- struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
+ return 0;
if (state == AMD_PG_STATE_GATE) {
uvd_v4_2_stop(adev);
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index 38864f56..57f1c5b 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -774,6 +774,11 @@
static int uvd_v5_0_set_clockgating_state(void *handle,
enum amd_clockgating_state state)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
+ return 0;
+
return 0;
}
@@ -789,6 +794,9 @@
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
+ return 0;
+
if (state == AMD_PG_STATE_GATE) {
uvd_v5_0_stop(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 3d59139..0b365b7 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -532,7 +532,7 @@
uvd_v6_0_mc_resume(adev);
/* Set dynamic clock gating in S/W control mode */
- if (adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG) {
+ if (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG) {
if (adev->flags & AMD_IS_APU)
cz_set_uvd_clock_gating_branches(adev, false);
else
@@ -1000,7 +1000,7 @@
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
- if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_UVD_MGCG))
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG))
return 0;
if (enable) {
@@ -1030,6 +1030,9 @@
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_UVD))
+ return 0;
+
if (state == AMD_PG_STATE_GATE) {
uvd_v6_0_stop(adev);
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index 52ac7a8..a822eda 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -373,7 +373,7 @@
{
bool sw_cg = false;
- if (enable && (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)) {
+ if (enable && (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)) {
if (sw_cg)
vce_v2_0_set_sw_cg(adev, true);
else
@@ -608,6 +608,9 @@
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
+ return 0;
+
if (state == AMD_PG_STATE_GATE)
/* XXX do we need a vce_v2_0_stop()? */
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index e99af81..d662fa9 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -277,7 +277,7 @@
WREG32_P(mmVCE_STATUS, 0, ~1);
/* Set Clock-Gating off */
- if (adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG)
+ if (adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG)
vce_v3_0_set_vce_sw_clock_gating(adev, false);
if (r) {
@@ -676,7 +676,7 @@
bool enable = (state == AMD_CG_STATE_GATE) ? true : false;
int i;
- if (!(adev->cg_flags & AMDGPU_CG_SUPPORT_VCE_MGCG))
+ if (!(adev->cg_flags & AMD_CG_SUPPORT_VCE_MGCG))
return 0;
mutex_lock(&adev->grbm_idx_mutex);
@@ -728,6 +728,9 @@
*/
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
+ if (!(adev->pg_flags & AMD_PG_SUPPORT_VCE))
+ return 0;
+
if (state == AMD_PG_STATE_GATE)
/* XXX do we need a vce_v3_0_stop()? */
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 89f5a1f..0d14d10 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -1457,8 +1457,7 @@
case CHIP_STONEY:
adev->has_uvd = true;
adev->cg_flags = 0;
- /* Disable UVD pg */
- adev->pg_flags = /* AMDGPU_PG_SUPPORT_UVD | */AMDGPU_PG_SUPPORT_VCE;
+ adev->pg_flags = 0;
adev->external_rev_id = adev->rev_id + 0x1;
break;
default:
diff --git a/drivers/gpu/drm/amd/include/amd_shared.h b/drivers/gpu/drm/amd/include/amd_shared.h
index 1195d06f..dbf7e64 100644
--- a/drivers/gpu/drm/amd/include/amd_shared.h
+++ b/drivers/gpu/drm/amd/include/amd_shared.h
@@ -85,6 +85,38 @@
AMD_PG_STATE_UNGATE,
};
+/* CG flags */
+#define AMD_CG_SUPPORT_GFX_MGCG (1 << 0)
+#define AMD_CG_SUPPORT_GFX_MGLS (1 << 1)
+#define AMD_CG_SUPPORT_GFX_CGCG (1 << 2)
+#define AMD_CG_SUPPORT_GFX_CGLS (1 << 3)
+#define AMD_CG_SUPPORT_GFX_CGTS (1 << 4)
+#define AMD_CG_SUPPORT_GFX_CGTS_LS (1 << 5)
+#define AMD_CG_SUPPORT_GFX_CP_LS (1 << 6)
+#define AMD_CG_SUPPORT_GFX_RLC_LS (1 << 7)
+#define AMD_CG_SUPPORT_MC_LS (1 << 8)
+#define AMD_CG_SUPPORT_MC_MGCG (1 << 9)
+#define AMD_CG_SUPPORT_SDMA_LS (1 << 10)
+#define AMD_CG_SUPPORT_SDMA_MGCG (1 << 11)
+#define AMD_CG_SUPPORT_BIF_LS (1 << 12)
+#define AMD_CG_SUPPORT_UVD_MGCG (1 << 13)
+#define AMD_CG_SUPPORT_VCE_MGCG (1 << 14)
+#define AMD_CG_SUPPORT_HDP_LS (1 << 15)
+#define AMD_CG_SUPPORT_HDP_MGCG (1 << 16)
+
+/* PG flags */
+#define AMD_PG_SUPPORT_GFX_PG (1 << 0)
+#define AMD_PG_SUPPORT_GFX_SMG (1 << 1)
+#define AMD_PG_SUPPORT_GFX_DMG (1 << 2)
+#define AMD_PG_SUPPORT_UVD (1 << 3)
+#define AMD_PG_SUPPORT_VCE (1 << 4)
+#define AMD_PG_SUPPORT_CP (1 << 5)
+#define AMD_PG_SUPPORT_GDS (1 << 6)
+#define AMD_PG_SUPPORT_RLC_SMU_HS (1 << 7)
+#define AMD_PG_SUPPORT_SDMA (1 << 8)
+#define AMD_PG_SUPPORT_ACP (1 << 9)
+#define AMD_PG_SUPPORT_SAMU (1 << 10)
+
enum amd_pm_state_type {
/* not used for dpm */
POWER_STATE_TYPE_DEFAULT,
diff --git a/drivers/gpu/drm/amd/include/cgs_common.h b/drivers/gpu/drm/amd/include/cgs_common.h
index 713aec9..aec38fc 100644
--- a/drivers/gpu/drm/amd/include/cgs_common.h
+++ b/drivers/gpu/drm/amd/include/cgs_common.h
@@ -109,6 +109,8 @@
CGS_SYSTEM_INFO_ADAPTER_BDF_ID = 1,
CGS_SYSTEM_INFO_PCIE_GEN_INFO,
CGS_SYSTEM_INFO_PCIE_MLW,
+ CGS_SYSTEM_INFO_CG_FLAGS,
+ CGS_SYSTEM_INFO_PG_FLAGS,
CGS_SYSTEM_INFO_ID_MAXIMUM,
};
diff --git a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
index 52a3efc..46410e3 100644
--- a/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/eventmgr/eventmgr.c
@@ -31,7 +31,7 @@
static int pem_init(struct pp_eventmgr *eventmgr)
{
int result = 0;
- struct pem_event_data event_data;
+ struct pem_event_data event_data = { {0} };
/* Initialize PowerPlay feature info */
pem_init_feature_info(eventmgr);
@@ -52,7 +52,7 @@
static void pem_fini(struct pp_eventmgr *eventmgr)
{
- struct pem_event_data event_data;
+ struct pem_event_data event_data = { {0} };
pem_uninit_featureInfo(eventmgr);
pem_unregister_interrupts(eventmgr);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
index 0874ab4..cf01177 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/cz_hwmgr.c
@@ -174,6 +174,8 @@
{
struct cz_hwmgr *cz_hwmgr = (struct cz_hwmgr *)(hwmgr->backend);
uint32_t i;
+ struct cgs_system_info sys_info = {0};
+ int result;
cz_hwmgr->gfx_ramp_step = 256*25/100;
@@ -247,6 +249,22 @@
phm_cap_set(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_DisableVoltageIsland);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (!result) {
+ if (sys_info.value & AMD_PG_SUPPORT_UVD)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ if (sys_info.value & AMD_PG_SUPPORT_VCE)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
index 44a9250..980d3bf 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/tonga_hwmgr.c
@@ -4451,6 +4451,7 @@
pp_atomctrl_gpio_pin_assignment gpio_pin_assignment;
struct phm_ppt_v1_information *pptable_info = (struct phm_ppt_v1_information *)(hwmgr->pptable);
phw_tonga_ulv_parm *ulv;
+ struct cgs_system_info sys_info = {0};
PP_ASSERT_WITH_CODE((NULL != hwmgr),
"Invalid Parameter!", return -1;);
@@ -4615,9 +4616,23 @@
data->vddc_phase_shed_control = 0;
- if (0 == result) {
- struct cgs_system_info sys_info = {0};
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+ sys_info.size = sizeof(struct cgs_system_info);
+ sys_info.info_id = CGS_SYSTEM_INFO_PG_FLAGS;
+ result = cgs_query_system_info(hwmgr->device, &sys_info);
+ if (!result) {
+ if (sys_info.value & AMD_PG_SUPPORT_UVD)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_UVDPowerGating);
+ if (sys_info.value & AMD_PG_SUPPORT_VCE)
+ phm_cap_set(hwmgr->platform_descriptor.platformCaps,
+ PHM_PlatformCaps_VCEPowerGating);
+ }
+ if (0 == result) {
data->is_tlu_enabled = 0;
hwmgr->platform_descriptor.hardwareActivityPerformanceLevels =
TONGA_MAX_HARDWARE_POWERLEVELS;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3f74193..9a7b446 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -65,8 +65,6 @@
*/
state->allow_modeset = true;
- state->num_connector = ACCESS_ONCE(dev->mode_config.num_connector);
-
state->crtcs = kcalloc(dev->mode_config.num_crtc,
sizeof(*state->crtcs), GFP_KERNEL);
if (!state->crtcs)
@@ -83,16 +81,6 @@
sizeof(*state->plane_states), GFP_KERNEL);
if (!state->plane_states)
goto fail;
- state->connectors = kcalloc(state->num_connector,
- sizeof(*state->connectors),
- GFP_KERNEL);
- if (!state->connectors)
- goto fail;
- state->connector_states = kcalloc(state->num_connector,
- sizeof(*state->connector_states),
- GFP_KERNEL);
- if (!state->connector_states)
- goto fail;
state->dev = dev;
@@ -823,19 +811,27 @@
index = drm_connector_index(connector);
- /*
- * Construction of atomic state updates can race with a connector
- * hot-add which might overflow. In this case flip the table and just
- * restart the entire ioctl - no one is fast enough to livelock a cpu
- * with physical hotplug events anyway.
- *
- * Note that we only grab the indexes once we have the right lock to
- * prevent hotplug/unplugging of connectors. So removal is no problem,
- * at most the array is a bit too large.
- */
if (index >= state->num_connector) {
- DRM_DEBUG_ATOMIC("Hot-added connector would overflow state array, restarting\n");
- return ERR_PTR(-EAGAIN);
+ struct drm_connector **c;
+ struct drm_connector_state **cs;
+ int alloc = max(index + 1, config->num_connector);
+
+ c = krealloc(state->connectors, alloc * sizeof(*state->connectors), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ state->connectors = c;
+ memset(&state->connectors[state->num_connector], 0,
+ sizeof(*state->connectors) * (alloc - state->num_connector));
+
+ cs = krealloc(state->connector_states, alloc * sizeof(*state->connector_states), GFP_KERNEL);
+ if (!cs)
+ return ERR_PTR(-ENOMEM);
+
+ state->connector_states = cs;
+ memset(&state->connector_states[state->num_connector], 0,
+ sizeof(*state->connector_states) * (alloc - state->num_connector));
+ state->num_connector = alloc;
}
if (state->connector_states[index])
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 7c52306..4f2d3e1 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1493,7 +1493,7 @@
{
int i;
- for (i = 0; i < dev->mode_config.num_connector; i++) {
+ for (i = 0; i < state->num_connector; i++) {
struct drm_connector *connector = state->connectors[i];
if (!connector)
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index d40bab2..f619121 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -918,12 +918,19 @@
connector->base.properties = &connector->properties;
connector->dev = dev;
connector->funcs = funcs;
+
+ connector->connector_id = ida_simple_get(&config->connector_ida, 0, 0, GFP_KERNEL);
+ if (connector->connector_id < 0) {
+ ret = connector->connector_id;
+ goto out_put;
+ }
+
connector->connector_type = connector_type;
connector->connector_type_id =
ida_simple_get(connector_ida, 1, 0, GFP_KERNEL);
if (connector->connector_type_id < 0) {
ret = connector->connector_type_id;
- goto out_put;
+ goto out_put_id;
}
connector->name =
kasprintf(GFP_KERNEL, "%s-%d",
@@ -931,7 +938,7 @@
connector->connector_type_id);
if (!connector->name) {
ret = -ENOMEM;
- goto out_put;
+ goto out_put_type_id;
}
INIT_LIST_HEAD(&connector->probed_modes);
@@ -959,7 +966,12 @@
}
connector->debugfs_entry = NULL;
-
+out_put_type_id:
+ if (ret)
+ ida_remove(connector_ida, connector->connector_type_id);
+out_put_id:
+ if (ret)
+ ida_remove(&config->connector_ida, connector->connector_id);
out_put:
if (ret)
drm_mode_object_put(dev, &connector->base);
@@ -996,6 +1008,9 @@
ida_remove(&drm_connector_enum_list[connector->connector_type].ida,
connector->connector_type_id);
+ ida_remove(&dev->mode_config.connector_ida,
+ connector->connector_id);
+
kfree(connector->display_info.bus_formats);
drm_mode_object_put(dev, &connector->base);
kfree(connector->name);
@@ -1013,32 +1028,6 @@
EXPORT_SYMBOL(drm_connector_cleanup);
/**
- * drm_connector_index - find the index of a registered connector
- * @connector: connector to find index for
- *
- * Given a registered connector, return the index of that connector within a DRM
- * device's list of connectors.
- */
-unsigned int drm_connector_index(struct drm_connector *connector)
-{
- unsigned int index = 0;
- struct drm_connector *tmp;
- struct drm_mode_config *config = &connector->dev->mode_config;
-
- WARN_ON(!drm_modeset_is_locked(&config->connection_mutex));
-
- drm_for_each_connector(tmp, connector->dev) {
- if (tmp == connector)
- return index;
-
- index++;
- }
-
- BUG();
-}
-EXPORT_SYMBOL(drm_connector_index);
-
-/**
* drm_connector_register - register a connector
* @connector: the connector to register
*
@@ -5789,6 +5778,7 @@
INIT_LIST_HEAD(&dev->mode_config.plane_list);
idr_init(&dev->mode_config.crtc_idr);
idr_init(&dev->mode_config.tile_idr);
+ ida_init(&dev->mode_config.connector_ida);
drm_modeset_lock_all(dev);
drm_mode_create_standard_properties(dev);
@@ -5869,6 +5859,7 @@
crtc->funcs->destroy(crtc);
}
+ ida_destroy(&dev->mode_config.connector_ida);
idr_destroy(&dev->mode_config.tile_idr);
idr_destroy(&dev->mode_config.crtc_idr);
drm_modeset_lock_fini(&dev->mode_config.connection_mutex);
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 8ae13de..27fbd79 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -1159,11 +1159,13 @@
drm_dp_put_port(port);
goto out;
}
-
- drm_mode_connector_set_tile_property(port->connector);
-
+ if (port->port_num >= DP_MST_LOGICAL_PORT_0) {
+ port->cached_edid = drm_get_edid(port->connector, &port->aux.ddc);
+ drm_mode_connector_set_tile_property(port->connector);
+ }
(*mstb->mgr->cbs->register_connector)(port->connector);
}
+
out:
/* put reference to this port */
drm_dp_put_port(port);
@@ -1188,8 +1190,8 @@
port->ddps = conn_stat->displayport_device_plug_status;
if (old_ddps != port->ddps) {
- dowork = true;
if (port->ddps) {
+ dowork = true;
} else {
port->available_pbn = 0;
}
@@ -1294,13 +1296,8 @@
if (port->input)
continue;
- if (!port->ddps) {
- if (port->cached_edid) {
- kfree(port->cached_edid);
- port->cached_edid = NULL;
- }
+ if (!port->ddps)
continue;
- }
if (!port->available_pbn)
drm_dp_send_enum_path_resources(mgr, mstb, port);
@@ -1311,12 +1308,6 @@
drm_dp_check_and_send_link_address(mgr, mstb_child);
drm_dp_put_mst_branch_device(mstb_child);
}
- } else if (port->pdt == DP_PEER_DEVICE_SST_SINK ||
- port->pdt == DP_PEER_DEVICE_DP_LEGACY_CONV) {
- if (!port->cached_edid) {
- port->cached_edid =
- drm_get_edid(port->connector, &port->aux.ddc);
- }
}
}
}
@@ -1336,8 +1327,6 @@
drm_dp_check_and_send_link_address(mgr, mstb);
drm_dp_put_mst_branch_device(mstb);
}
-
- (*mgr->cbs->hotplug)(mgr);
}
static bool drm_dp_validate_guid(struct drm_dp_mst_topology_mgr *mgr,
@@ -1597,6 +1586,7 @@
for (i = 0; i < txmsg->reply.u.link_addr.nports; i++) {
drm_dp_add_port(mstb, mgr->dev, &txmsg->reply.u.link_addr.ports[i]);
}
+ (*mgr->cbs->hotplug)(mgr);
}
} else {
mstb->link_address_sent = false;
@@ -2293,6 +2283,8 @@
drm_dp_update_port(mstb, &msg.u.conn_stat);
DRM_DEBUG_KMS("Got CSN: pn: %d ldps:%d ddps: %d mcs: %d ip: %d pdt: %d\n", msg.u.conn_stat.port_number, msg.u.conn_stat.legacy_device_plug_status, msg.u.conn_stat.displayport_device_plug_status, msg.u.conn_stat.message_capability_status, msg.u.conn_stat.input_port, msg.u.conn_stat.peer_device_type);
+ (*mgr->cbs->hotplug)(mgr);
+
} else if (msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, msg.req_type, seqno, false);
if (!mstb)
@@ -2379,6 +2371,10 @@
case DP_PEER_DEVICE_SST_SINK:
status = connector_status_connected;
+ /* for logical ports - cache the EDID */
+ if (port->port_num >= 8 && !port->cached_edid) {
+ port->cached_edid = drm_get_edid(connector, &port->aux.ddc);
+ }
break;
case DP_PEER_DEVICE_DP_LEGACY_CONV:
if (port->ldps)
@@ -2433,7 +2429,10 @@
if (port->cached_edid)
edid = drm_edid_duplicate(port->cached_edid);
-
+ else {
+ edid = drm_get_edid(connector, &port->aux.ddc);
+ drm_mode_connector_set_tile_property(connector);
+ }
port->has_audio = drm_detect_monitor_audio(edid);
drm_dp_put_port(port);
return edid;
diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c
index d12a4ef..1fe1457 100644
--- a/drivers/gpu/drm/drm_irq.c
+++ b/drivers/gpu/drm/drm_irq.c
@@ -224,6 +224,64 @@
diff = (flags & DRM_CALLED_FROM_VBLIRQ) != 0;
}
+ /*
+ * Within a drm_vblank_pre_modeset - drm_vblank_post_modeset
+ * interval? If so then vblank irqs keep running and it will likely
+ * happen that the hardware vblank counter is not trustworthy as it
+ * might reset at some point in that interval and vblank timestamps
+ * are not trustworthy either in that interval. Iow. this can result
+ * in a bogus diff >> 1 which must be avoided as it would cause
+ * random large forward jumps of the software vblank counter.
+ */
+ if (diff > 1 && (vblank->inmodeset & 0x2)) {
+ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u"
+ " due to pre-modeset.\n", pipe, diff);
+ diff = 1;
+ }
+
+ /*
+ * FIMXE: Need to replace this hack with proper seqlocks.
+ *
+ * Restrict the bump of the software vblank counter to a safe maximum
+ * value of +1 whenever there is the possibility that concurrent readers
+ * of vblank timestamps could be active at the moment, as the current
+ * implementation of the timestamp caching and updating is not safe
+ * against concurrent readers for calls to store_vblank() with a bump
+ * of anything but +1. A bump != 1 would very likely return corrupted
+ * timestamps to userspace, because the same slot in the cache could
+ * be concurrently written by store_vblank() and read by one of those
+ * readers without the read-retry logic detecting the collision.
+ *
+ * Concurrent readers can exist when we are called from the
+ * drm_vblank_off() or drm_vblank_on() functions and other non-vblank-
+ * irq callers. However, all those calls to us are happening with the
+ * vbl_lock locked to prevent drm_vblank_get(), so the vblank refcount
+ * can't increase while we are executing. Therefore a zero refcount at
+ * this point is safe for arbitrary counter bumps if we are called
+ * outside vblank irq, a non-zero count is not 100% safe. Unfortunately
+ * we must also accept a refcount of 1, as whenever we are called from
+ * drm_vblank_get() -> drm_vblank_enable() the refcount will be 1 and
+ * we must let that one pass through in order to not lose vblank counts
+ * during vblank irq off - which would completely defeat the whole
+ * point of this routine.
+ *
+ * Whenever we are called from vblank irq, we have to assume concurrent
+ * readers exist or can show up any time during our execution, even if
+ * the refcount is currently zero, as vblank irqs are usually only
+ * enabled due to the presence of readers, and because when we are called
+ * from vblank irq we can't hold the vbl_lock to protect us from sudden
+ * bumps in vblank refcount. Therefore also restrict bumps to +1 when
+ * called from vblank irq.
+ */
+ if ((diff > 1) && (atomic_read(&vblank->refcount) > 1 ||
+ (flags & DRM_CALLED_FROM_VBLIRQ))) {
+ DRM_DEBUG_VBL("clamping vblank bump to 1 on crtc %u: diffr=%u "
+ "refcount %u, vblirq %u\n", pipe, diff,
+ atomic_read(&vblank->refcount),
+ (flags & DRM_CALLED_FROM_VBLIRQ) != 0);
+ diff = 1;
+ }
+
DRM_DEBUG_VBL("updating vblank count on crtc %u:"
" current=%u, diff=%u, hw=%u hw_last=%u\n",
pipe, vblank->count, diff, cur_vblank, vblank->last);
@@ -1316,7 +1374,13 @@
spin_lock_irqsave(&dev->event_lock, irqflags);
spin_lock(&dev->vbl_lock);
- vblank_disable_and_save(dev, pipe);
+ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+ pipe, vblank->enabled, vblank->inmodeset);
+
+ /* Avoid redundant vblank disables without previous drm_vblank_on(). */
+ if (drm_core_check_feature(dev, DRIVER_ATOMIC) || !vblank->inmodeset)
+ vblank_disable_and_save(dev, pipe);
+
wake_up(&vblank->queue);
/*
@@ -1418,6 +1482,9 @@
return;
spin_lock_irqsave(&dev->vbl_lock, irqflags);
+ DRM_DEBUG_VBL("crtc %d, vblank enabled %d, inmodeset %d\n",
+ pipe, vblank->enabled, vblank->inmodeset);
+
/* Drop our private "prevent drm_vblank_get" refcount */
if (vblank->inmodeset) {
atomic_dec(&vblank->refcount);
@@ -1430,8 +1497,7 @@
* re-enable interrupts if there are users left, or the
* user wishes vblank interrupts to be enabled all the time.
*/
- if (atomic_read(&vblank->refcount) != 0 ||
- (!dev->vblank_disable_immediate && drm_vblank_offdelay == 0))
+ if (atomic_read(&vblank->refcount) != 0 || drm_vblank_offdelay == 0)
WARN_ON(drm_vblank_enable(dev, pipe));
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
}
@@ -1526,6 +1592,7 @@
if (vblank->inmodeset) {
spin_lock_irqsave(&dev->vbl_lock, irqflags);
dev->vblank_disable_allowed = true;
+ drm_reset_vblank_timestamp(dev, pipe);
spin_unlock_irqrestore(&dev->vbl_lock, irqflags);
if (vblank->inmodeset & 0x2)
diff --git a/drivers/gpu/drm/exynos/Kconfig b/drivers/gpu/drm/exynos/Kconfig
index 83efca9..f17d392 100644
--- a/drivers/gpu/drm/exynos/Kconfig
+++ b/drivers/gpu/drm/exynos/Kconfig
@@ -1,6 +1,6 @@
config DRM_EXYNOS
tristate "DRM Support for Samsung SoC EXYNOS Series"
- depends on OF && DRM && (PLAT_SAMSUNG || ARCH_MULTIPLATFORM)
+ depends on OF && DRM && (ARCH_S3C64XX || ARCH_EXYNOS || ARCH_MULTIPLATFORM)
select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER
select FB_CFB_FILLRECT
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 1bf6a21..162ab93 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -93,7 +93,7 @@
if (test_bit(BIT_SUSPENDED, &ctx->flags))
return -EPERM;
- if (test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
+ if (!test_and_set_bit(BIT_IRQS_ENABLED, &ctx->flags)) {
val = VIDINTCON0_INTEN;
if (ctx->out_type == IFTYPE_I80)
val |= VIDINTCON0_FRAMEDONE;
@@ -402,8 +402,6 @@
decon_enable_vblank(ctx->crtc);
decon_commit(ctx->crtc);
-
- set_bit(BIT_SUSPENDED, &ctx->flags);
}
static void decon_disable(struct exynos_drm_crtc *crtc)
@@ -582,9 +580,9 @@
static int exynos5433_decon_suspend(struct device *dev)
{
struct decon_context *ctx = dev_get_drvdata(dev);
- int i;
+ int i = ARRAY_SIZE(decon_clks_name);
- for (i = 0; i < ARRAY_SIZE(decon_clks_name); i++)
+ while (--i >= 0)
clk_disable_unprepare(ctx->clks[i]);
return 0;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index e977a81..26e81d19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1782,6 +1782,7 @@
bridge = of_drm_find_bridge(dsi->bridge_node);
if (bridge) {
+ encoder->bridge = bridge;
drm_bridge_attach(drm_dev, bridge);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
index f6118ba..8baabd8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fbdev.c
@@ -50,7 +50,7 @@
if (vm_size > exynos_gem->size)
return -EINVAL;
- ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->pages,
+ ret = dma_mmap_attrs(helper->dev->dev, vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
&exynos_gem->dma_attrs);
if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index c747824..8a4f4a0 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -1723,7 +1723,7 @@
goto err_put_clk;
}
- DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
+ DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
spin_lock_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index c17efdb..8dfe6e1 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -1166,7 +1166,7 @@
goto err_free_event;
}
- cmd = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd;
+ cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
if (copy_from_user(cmdlist->data + cmdlist->last,
(void __user *)cmd,
@@ -1184,7 +1184,8 @@
if (req->cmd_buf_nr) {
struct drm_exynos_g2d_cmd *cmd_buf;
- cmd_buf = (struct drm_exynos_g2d_cmd *)(uint32_t)req->cmd_buf;
+ cmd_buf = (struct drm_exynos_g2d_cmd *)
+ (unsigned long)req->cmd_buf;
if (copy_from_user(cmdlist->data + cmdlist->last,
(void __user *)cmd_buf,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 32358c5..26b5e4b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -218,7 +218,7 @@
return ERR_PTR(ret);
}
- DRM_DEBUG_KMS("created file object = 0x%x\n", (unsigned int)obj->filp);
+ DRM_DEBUG_KMS("created file object = %p\n", obj->filp);
return exynos_gem;
}
@@ -335,7 +335,7 @@
if (vm_size > exynos_gem->size)
return -EINVAL;
- ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->pages,
+ ret = dma_mmap_attrs(drm_dev->dev, vma, exynos_gem->cookie,
exynos_gem->dma_addr, exynos_gem->size,
&exynos_gem->dma_attrs);
if (ret < 0) {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 7aecd23..5d20da8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -1723,7 +1723,7 @@
return ret;
}
- DRM_DEBUG_KMS("id[%d]ippdrv[0x%x]\n", ctx->id, (int)ippdrv);
+ DRM_DEBUG_KMS("id[%d]ippdrv[%p]\n", ctx->id, ippdrv);
mutex_init(&ctx->lock);
platform_set_drvdata(pdev, ctx);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 67d2423..95eeb91 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -208,7 +208,7 @@
* e.g PAUSE state, queue buf, command control.
*/
list_for_each_entry(ippdrv, &exynos_drm_ippdrv_list, drv_list) {
- DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n", count++, (int)ippdrv);
+ DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n", count++, ippdrv);
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry(c_node, &ippdrv->cmd_list, list) {
@@ -388,8 +388,8 @@
}
property->prop_id = ret;
- DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[0x%x]\n",
- property->prop_id, property->cmd, (int)ippdrv);
+ DRM_DEBUG_KMS("created prop_id[%d]cmd[%d]ippdrv[%p]\n",
+ property->prop_id, property->cmd, ippdrv);
/* stored property information and ippdrv in private data */
c_node->property = *property;
@@ -518,7 +518,7 @@
{
int i;
- DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
+ DRM_DEBUG_KMS("node[%p]\n", m_node);
if (!m_node) {
DRM_ERROR("invalid dequeue node.\n");
@@ -562,7 +562,7 @@
m_node->buf_id = qbuf->buf_id;
INIT_LIST_HEAD(&m_node->list);
- DRM_DEBUG_KMS("m_node[0x%x]ops_id[%d]\n", (int)m_node, qbuf->ops_id);
+ DRM_DEBUG_KMS("m_node[%p]ops_id[%d]\n", m_node, qbuf->ops_id);
DRM_DEBUG_KMS("prop_id[%d]buf_id[%d]\n", qbuf->prop_id, m_node->buf_id);
for_each_ipp_planar(i) {
@@ -582,8 +582,8 @@
buf_info->handles[i] = qbuf->handle[i];
buf_info->base[i] = *addr;
- DRM_DEBUG_KMS("i[%d]base[0x%x]hd[0x%lx]\n", i,
- buf_info->base[i], buf_info->handles[i]);
+ DRM_DEBUG_KMS("i[%d]base[%pad]hd[0x%lx]\n", i,
+ &buf_info->base[i], buf_info->handles[i]);
}
}
@@ -664,7 +664,7 @@
mutex_lock(&c_node->event_lock);
list_for_each_entry_safe(e, te, &c_node->event_list, base.link) {
- DRM_DEBUG_KMS("count[%d]e[0x%x]\n", count++, (int)e);
+ DRM_DEBUG_KMS("count[%d]e[%p]\n", count++, e);
/*
* qbuf == NULL condition means all event deletion.
@@ -755,7 +755,7 @@
/* find memory node from memory list */
list_for_each_entry(m_node, head, list) {
- DRM_DEBUG_KMS("count[%d]m_node[0x%x]\n", count++, (int)m_node);
+ DRM_DEBUG_KMS("count[%d]m_node[%p]\n", count++, m_node);
/* compare buffer id */
if (m_node->buf_id == qbuf->buf_id)
@@ -772,7 +772,7 @@
struct exynos_drm_ipp_ops *ops = NULL;
int ret = 0;
- DRM_DEBUG_KMS("node[0x%x]\n", (int)m_node);
+ DRM_DEBUG_KMS("node[%p]\n", m_node);
if (!m_node) {
DRM_ERROR("invalid queue node.\n");
@@ -1237,7 +1237,7 @@
m_node = list_first_entry(head,
struct drm_exynos_ipp_mem_node, list);
- DRM_DEBUG_KMS("m_node[0x%x]\n", (int)m_node);
+ DRM_DEBUG_KMS("m_node[%p]\n", m_node);
ret = ipp_set_mem_node(ippdrv, c_node, m_node);
if (ret) {
@@ -1610,8 +1610,8 @@
}
ippdrv->prop_list.ipp_id = ret;
- DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]ipp_id[%d]\n",
- count++, (int)ippdrv, ret);
+ DRM_DEBUG_KMS("count[%d]ippdrv[%p]ipp_id[%d]\n",
+ count++, ippdrv, ret);
/* store parent device for node */
ippdrv->parent_dev = dev;
@@ -1668,7 +1668,7 @@
file_priv->ipp_dev = dev;
- DRM_DEBUG_KMS("done priv[0x%x]\n", (int)dev);
+ DRM_DEBUG_KMS("done priv[%p]\n", dev);
return 0;
}
@@ -1685,8 +1685,8 @@
mutex_lock(&ippdrv->cmd_lock);
list_for_each_entry_safe(c_node, tc_node,
&ippdrv->cmd_list, list) {
- DRM_DEBUG_KMS("count[%d]ippdrv[0x%x]\n",
- count++, (int)ippdrv);
+ DRM_DEBUG_KMS("count[%d]ippdrv[%p]\n",
+ count++, ippdrv);
if (c_node->filp == file) {
/*
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 4eaef36..9869d70 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -18,6 +18,7 @@
#include <linux/of.h>
#include <linux/of_graph.h>
#include <linux/clk.h>
+#include <linux/component.h>
#include <drm/drmP.h>
#include <linux/mfd/syscon.h>
#include <linux/regmap.h>
@@ -306,9 +307,9 @@
return ret;
}
-void mic_disable(struct drm_bridge *bridge) { }
+static void mic_disable(struct drm_bridge *bridge) { }
-void mic_post_disable(struct drm_bridge *bridge)
+static void mic_post_disable(struct drm_bridge *bridge)
{
struct exynos_mic *mic = bridge->driver_private;
int i;
@@ -328,7 +329,7 @@
mutex_unlock(&mic_mutex);
}
-void mic_pre_enable(struct drm_bridge *bridge)
+static void mic_pre_enable(struct drm_bridge *bridge)
{
struct exynos_mic *mic = bridge->driver_private;
int ret, i;
@@ -371,11 +372,35 @@
mutex_unlock(&mic_mutex);
}
-void mic_enable(struct drm_bridge *bridge) { }
+static void mic_enable(struct drm_bridge *bridge) { }
-void mic_destroy(struct drm_bridge *bridge)
+static const struct drm_bridge_funcs mic_bridge_funcs = {
+ .disable = mic_disable,
+ .post_disable = mic_post_disable,
+ .pre_enable = mic_pre_enable,
+ .enable = mic_enable,
+};
+
+static int exynos_mic_bind(struct device *dev, struct device *master,
+ void *data)
{
- struct exynos_mic *mic = bridge->driver_private;
+ struct exynos_mic *mic = dev_get_drvdata(dev);
+ int ret;
+
+ mic->bridge.funcs = &mic_bridge_funcs;
+ mic->bridge.of_node = dev->of_node;
+ mic->bridge.driver_private = mic;
+ ret = drm_bridge_add(&mic->bridge);
+ if (ret)
+ DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
+
+ return ret;
+}
+
+static void exynos_mic_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct exynos_mic *mic = dev_get_drvdata(dev);
int i;
mutex_lock(&mic_mutex);
@@ -387,16 +412,16 @@
already_disabled:
mutex_unlock(&mic_mutex);
+
+ drm_bridge_remove(&mic->bridge);
}
-static const struct drm_bridge_funcs mic_bridge_funcs = {
- .disable = mic_disable,
- .post_disable = mic_post_disable,
- .pre_enable = mic_pre_enable,
- .enable = mic_enable,
+static const struct component_ops exynos_mic_component_ops = {
+ .bind = exynos_mic_bind,
+ .unbind = exynos_mic_unbind,
};
-int exynos_mic_probe(struct platform_device *pdev)
+static int exynos_mic_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct exynos_mic *mic;
@@ -435,17 +460,8 @@
goto err;
}
- mic->bridge.funcs = &mic_bridge_funcs;
- mic->bridge.of_node = dev->of_node;
- mic->bridge.driver_private = mic;
- ret = drm_bridge_add(&mic->bridge);
- if (ret) {
- DRM_ERROR("mic: Failed to add MIC to the global bridge list\n");
- goto err;
- }
-
for (i = 0; i < NUM_CLKS; i++) {
- mic->clks[i] = of_clk_get_by_name(dev->of_node, clk_names[i]);
+ mic->clks[i] = devm_clk_get(dev, clk_names[i]);
if (IS_ERR(mic->clks[i])) {
DRM_ERROR("mic: Failed to get clock (%s)\n",
clk_names[i]);
@@ -454,7 +470,10 @@
}
}
+ platform_set_drvdata(pdev, mic);
+
DRM_DEBUG_KMS("MIC has been probed\n");
+ return component_add(dev, &exynos_mic_component_ops);
err:
return ret;
@@ -462,14 +481,7 @@
static int exynos_mic_remove(struct platform_device *pdev)
{
- struct exynos_mic *mic = platform_get_drvdata(pdev);
- int i;
-
- drm_bridge_remove(&mic->bridge);
-
- for (i = NUM_CLKS - 1; i > -1; i--)
- clk_put(mic->clks[i]);
-
+ component_del(&pdev->dev, &exynos_mic_component_ops);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index bea0f78..ce59f44 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -754,7 +754,7 @@
goto err_ippdrv_register;
}
- DRM_DEBUG_KMS("ippdrv[0x%x]\n", (int)ippdrv);
+ DRM_DEBUG_KMS("ippdrv[%p]\n", ippdrv);
platform_set_drvdata(pdev, rot);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_vidi.c b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
index 62ac4e5..b605bd7 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_vidi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_vidi.c
@@ -223,7 +223,7 @@
}
}
-static int vidi_show_connection(struct device *dev,
+static ssize_t vidi_show_connection(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct vidi_context *ctx = dev_get_drvdata(dev);
@@ -238,7 +238,7 @@
return rc;
}
-static int vidi_store_connection(struct device *dev,
+static ssize_t vidi_store_connection(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t len)
{
@@ -294,7 +294,9 @@
}
if (vidi->connection) {
- struct edid *raw_edid = (struct edid *)(uint32_t)vidi->edid;
+ struct edid *raw_edid;
+
+ raw_edid = (struct edid *)(unsigned long)vidi->edid;
if (!drm_edid_is_valid(raw_edid)) {
DRM_DEBUG_KMS("edid data is invalid.\n");
return -EINVAL;
diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
index 0fc38bb..cf39ed3 100644
--- a/drivers/gpu/drm/i915/i915_debugfs.c
+++ b/drivers/gpu/drm/i915/i915_debugfs.c
@@ -825,8 +825,11 @@
}
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(pipe))) {
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv,
+ power_domain)) {
seq_printf(m, "Pipe %c power disabled\n",
pipe_name(pipe));
continue;
@@ -840,6 +843,8 @@
seq_printf(m, "Pipe %c IER:\t%08x\n",
pipe_name(pipe),
I915_READ(GEN8_DE_PIPE_IER(pipe)));
+
+ intel_display_power_put(dev_priv, power_domain);
}
seq_printf(m, "Display Engine port interrupt mask:\t%08x\n",
@@ -3985,6 +3990,7 @@
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[pipe];
struct intel_crtc *crtc = to_intel_crtc(intel_get_crtc_for_pipe(dev,
pipe));
+ enum intel_display_power_domain power_domain;
u32 val = 0; /* shut up gcc */
int ret;
@@ -3995,7 +4001,8 @@
if (pipe_crc->source && source)
return -EINVAL;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe))) {
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain)) {
DRM_DEBUG_KMS("Trying to capture CRC while pipe is off\n");
return -EIO;
}
@@ -4012,7 +4019,7 @@
ret = ivb_pipe_crc_ctl_reg(dev, pipe, &source, &val);
if (ret != 0)
- return ret;
+ goto out;
/* none -> real source transition */
if (source) {
@@ -4024,8 +4031,10 @@
entries = kcalloc(INTEL_PIPE_CRC_ENTRIES_NR,
sizeof(pipe_crc->entries[0]),
GFP_KERNEL);
- if (!entries)
- return -ENOMEM;
+ if (!entries) {
+ ret = -ENOMEM;
+ goto out;
+ }
/*
* When IPS gets enabled, the pipe CRC changes. Since IPS gets
@@ -4081,7 +4090,12 @@
hsw_enable_ips(crtc);
}
- return 0;
+ ret = 0;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
/*
diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
index f0f75d7..b0847b9 100644
--- a/drivers/gpu/drm/i915/i915_drv.h
+++ b/drivers/gpu/drm/i915/i915_drv.h
@@ -751,6 +751,7 @@
uint32_t mmio_count;
i915_reg_t mmioaddr[8];
uint32_t mmiodata[8];
+ uint32_t dc_state;
};
#define DEV_INFO_FOR_EACH_FLAG(func, sep) \
@@ -1988,6 +1989,9 @@
#define I915_GTT_OFFSET_NONE ((u32)-1)
struct drm_i915_gem_object_ops {
+ unsigned int flags;
+#define I915_GEM_OBJECT_HAS_STRUCT_PAGE 0x1
+
/* Interface between the GEM object and its backing storage.
* get_pages() is called once prior to the use of the associated set
* of pages before to binding them into the GTT, and put_pages() is
@@ -2003,6 +2007,7 @@
*/
int (*get_pages)(struct drm_i915_gem_object *);
void (*put_pages)(struct drm_i915_gem_object *);
+
int (*dmabuf_export)(struct drm_i915_gem_object *);
void (*release)(struct drm_i915_gem_object *);
};
diff --git a/drivers/gpu/drm/i915/i915_gem.c b/drivers/gpu/drm/i915/i915_gem.c
index ddc21d4..bb44bad 100644
--- a/drivers/gpu/drm/i915/i915_gem.c
+++ b/drivers/gpu/drm/i915/i915_gem.c
@@ -4425,6 +4425,7 @@
}
static const struct drm_i915_gem_object_ops i915_gem_object_ops = {
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = i915_gem_object_get_pages_gtt,
.put_pages = i915_gem_object_put_pages_gtt,
};
@@ -5261,7 +5262,7 @@
struct page *page;
/* Only default objects have per-page dirty tracking */
- if (WARN_ON(obj->ops != &i915_gem_object_ops))
+ if (WARN_ON((obj->ops->flags & I915_GEM_OBJECT_HAS_STRUCT_PAGE) == 0))
return NULL;
page = i915_gem_object_get_page(obj, n);
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index 19fb0bdd..59e45b3 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -789,9 +789,10 @@
}
static const struct drm_i915_gem_object_ops i915_gem_userptr_ops = {
- .dmabuf_export = i915_gem_userptr_dmabuf_export,
+ .flags = I915_GEM_OBJECT_HAS_STRUCT_PAGE,
.get_pages = i915_gem_userptr_get_pages,
.put_pages = i915_gem_userptr_put_pages,
+ .dmabuf_export = i915_gem_userptr_dmabuf_export,
.release = i915_gem_userptr_release,
};
diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h
index 007ae83..4897728 100644
--- a/drivers/gpu/drm/i915/i915_reg.h
+++ b/drivers/gpu/drm/i915/i915_reg.h
@@ -3287,19 +3287,20 @@
#define PORT_HOTPLUG_STAT _MMIO(dev_priv->info.display_mmio_offset + 0x61114)
/*
- * HDMI/DP bits are gen4+
+ * HDMI/DP bits are g4x+
*
* WARNING: Bspec for hpd status bits on gen4 seems to be completely confused.
* Please check the detailed lore in the commit message for for experimental
* evidence.
*/
-#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
+/* Bspec says GM45 should match G4X/VLV/CHV, but reality disagrees */
+#define PORTD_HOTPLUG_LIVE_STATUS_GM45 (1 << 29)
+#define PORTC_HOTPLUG_LIVE_STATUS_GM45 (1 << 28)
+#define PORTB_HOTPLUG_LIVE_STATUS_GM45 (1 << 27)
+/* G4X/VLV/CHV DP/HDMI bits again match Bspec */
+#define PORTD_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
#define PORTC_HOTPLUG_LIVE_STATUS_G4X (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 27)
-/* VLV DP/HDMI bits again match Bspec */
-#define PORTD_HOTPLUG_LIVE_STATUS_VLV (1 << 27)
-#define PORTC_HOTPLUG_LIVE_STATUS_VLV (1 << 28)
-#define PORTB_HOTPLUG_LIVE_STATUS_VLV (1 << 29)
+#define PORTB_HOTPLUG_LIVE_STATUS_G4X (1 << 29)
#define PORTD_HOTPLUG_INT_STATUS (3 << 21)
#define PORTD_HOTPLUG_INT_LONG_PULSE (2 << 21)
#define PORTD_HOTPLUG_INT_SHORT_PULSE (1 << 21)
@@ -7514,7 +7515,7 @@
#define DPLL_CFGCR2_PDIV_7 (4<<2)
#define DPLL_CFGCR2_CENTRAL_FREQ_MASK (3)
-#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR2)
+#define DPLL_CFGCR1(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR1, _DPLL2_CFGCR1)
#define DPLL_CFGCR2(id) _MMIO_PIPE((id) - SKL_DPLL1, _DPLL1_CFGCR2, _DPLL2_CFGCR2)
/* BXT display engine PLL */
diff --git a/drivers/gpu/drm/i915/i915_suspend.c b/drivers/gpu/drm/i915/i915_suspend.c
index a2aa09c..a8af594 100644
--- a/drivers/gpu/drm/i915/i915_suspend.c
+++ b/drivers/gpu/drm/i915/i915_suspend.c
@@ -49,7 +49,7 @@
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PCH_PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PCH_PP_OFF_DELAYS);
dev_priv->regfile.savePP_DIVISOR = I915_READ(PCH_PP_DIVISOR);
- } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ } else if (INTEL_INFO(dev)->gen <= 4) {
dev_priv->regfile.savePP_CONTROL = I915_READ(PP_CONTROL);
dev_priv->regfile.savePP_ON_DELAYS = I915_READ(PP_ON_DELAYS);
dev_priv->regfile.savePP_OFF_DELAYS = I915_READ(PP_OFF_DELAYS);
@@ -84,7 +84,7 @@
I915_WRITE(PCH_PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PCH_PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
I915_WRITE(PCH_PP_CONTROL, dev_priv->regfile.savePP_CONTROL);
- } else if (!IS_VALLEYVIEW(dev) && !IS_CHERRYVIEW(dev)) {
+ } else if (INTEL_INFO(dev)->gen <= 4) {
I915_WRITE(PP_ON_DELAYS, dev_priv->regfile.savePP_ON_DELAYS);
I915_WRITE(PP_OFF_DELAYS, dev_priv->regfile.savePP_OFF_DELAYS);
I915_WRITE(PP_DIVISOR, dev_priv->regfile.savePP_DIVISOR);
diff --git a/drivers/gpu/drm/i915/intel_crt.c b/drivers/gpu/drm/i915/intel_crt.c
index 9c89df1..a7b4a524 100644
--- a/drivers/gpu/drm/i915/intel_crt.c
+++ b/drivers/gpu/drm/i915/intel_crt.c
@@ -71,22 +71,29 @@
struct intel_crt *crt = intel_encoder_to_crt(encoder);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(crt->adpa_reg);
if (!(tmp & ADPA_DAC_ENABLE))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static unsigned int intel_crt_get_flags(struct intel_encoder *encoder)
diff --git a/drivers/gpu/drm/i915/intel_csr.c b/drivers/gpu/drm/i915/intel_csr.c
index 9bb63a8..647d85e 100644
--- a/drivers/gpu/drm/i915/intel_csr.c
+++ b/drivers/gpu/drm/i915/intel_csr.c
@@ -240,6 +240,8 @@
I915_WRITE(dev_priv->csr.mmioaddr[i],
dev_priv->csr.mmiodata[i]);
}
+
+ dev_priv->csr.dc_state = 0;
}
static uint32_t *parse_csr_fw(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_ddi.c b/drivers/gpu/drm/i915/intel_ddi.c
index e6408e5..0f3df2c 100644
--- a/drivers/gpu/drm/i915/intel_ddi.c
+++ b/drivers/gpu/drm/i915/intel_ddi.c
@@ -1589,7 +1589,8 @@
DPLL_CFGCR2_KDIV(wrpll_params.kdiv) |
DPLL_CFGCR2_PDIV(wrpll_params.pdiv) |
wrpll_params.central_freq;
- } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT) {
+ } else if (intel_encoder->type == INTEL_OUTPUT_DISPLAYPORT ||
+ intel_encoder->type == INTEL_OUTPUT_DP_MST) {
switch (crtc_state->port_clock / 2) {
case 81000:
ctrl1 |= DPLL_CTRL1_LINK_RATE(DPLL_CTRL1_LINK_RATE_810, 0);
@@ -1968,13 +1969,16 @@
enum transcoder cpu_transcoder;
enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(intel_encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
- if (!intel_encoder->get_hw_state(intel_encoder, &pipe))
- return false;
+ if (!intel_encoder->get_hw_state(intel_encoder, &pipe)) {
+ ret = false;
+ goto out;
+ }
if (port == PORT_A)
cpu_transcoder = TRANSCODER_EDP;
@@ -1986,23 +1990,33 @@
switch (tmp & TRANS_DDI_MODE_SELECT_MASK) {
case TRANS_DDI_MODE_SELECT_HDMI:
case TRANS_DDI_MODE_SELECT_DVI:
- return (type == DRM_MODE_CONNECTOR_HDMIA);
+ ret = type == DRM_MODE_CONNECTOR_HDMIA;
+ break;
case TRANS_DDI_MODE_SELECT_DP_SST:
- if (type == DRM_MODE_CONNECTOR_eDP)
- return true;
- return (type == DRM_MODE_CONNECTOR_DisplayPort);
+ ret = type == DRM_MODE_CONNECTOR_eDP ||
+ type == DRM_MODE_CONNECTOR_DisplayPort;
+ break;
+
case TRANS_DDI_MODE_SELECT_DP_MST:
/* if the transcoder is in MST state then
* connector isn't connected */
- return false;
+ ret = false;
+ break;
case TRANS_DDI_MODE_SELECT_FDI:
- return (type == DRM_MODE_CONNECTOR_VGA);
+ ret = type == DRM_MODE_CONNECTOR_VGA;
+ break;
default:
- return false;
+ ret = false;
+ break;
}
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
bool intel_ddi_get_hw_state(struct intel_encoder *encoder,
@@ -2014,15 +2028,18 @@
enum intel_display_power_domain power_domain;
u32 tmp;
int i;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(DDI_BUF_CTL(port));
if (!(tmp & DDI_BUF_CTL_ENABLE))
- return false;
+ goto out;
if (port == PORT_A) {
tmp = I915_READ(TRANS_DDI_FUNC_CTL(TRANSCODER_EDP));
@@ -2040,25 +2057,32 @@
break;
}
- return true;
- } else {
- for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
- tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+ ret = true;
- if ((tmp & TRANS_DDI_PORT_MASK)
- == TRANS_DDI_SELECT_PORT(port)) {
- if ((tmp & TRANS_DDI_MODE_SELECT_MASK) == TRANS_DDI_MODE_SELECT_DP_MST)
- return false;
+ goto out;
+ }
- *pipe = i;
- return true;
- }
+ for (i = TRANSCODER_A; i <= TRANSCODER_C; i++) {
+ tmp = I915_READ(TRANS_DDI_FUNC_CTL(i));
+
+ if ((tmp & TRANS_DDI_PORT_MASK) == TRANS_DDI_SELECT_PORT(port)) {
+ if ((tmp & TRANS_DDI_MODE_SELECT_MASK) ==
+ TRANS_DDI_MODE_SELECT_DP_MST)
+ goto out;
+
+ *pipe = i;
+ ret = true;
+
+ goto out;
}
}
DRM_DEBUG_KMS("No pipe for ddi port %c found\n", port_name(port));
- return false;
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
void intel_ddi_enable_pipe_clock(struct intel_crtc *intel_crtc)
@@ -2507,12 +2531,14 @@
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(WRPLL_CTL(pll->id));
hw_state->wrpll = val;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & WRPLL_PLL_ENABLE;
}
@@ -2522,12 +2548,14 @@
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(SPLL_CTL);
hw_state->spll = val;
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & SPLL_PLL_ENABLE;
}
@@ -2644,16 +2672,19 @@
uint32_t val;
unsigned int dpll;
const struct skl_dpll_regs *regs = skl_dpll_regs;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
+ ret = false;
+
/* DPLL0 is not part of the shared DPLLs, so pll->id is 0 for DPLL1 */
dpll = pll->id + 1;
val = I915_READ(regs[pll->id].ctl);
if (!(val & LCPLL_PLL_ENABLE))
- return false;
+ goto out;
val = I915_READ(DPLL_CTRL1);
hw_state->ctrl1 = (val >> (dpll * 6)) & 0x3f;
@@ -2663,8 +2694,12 @@
hw_state->cfgcr1 = I915_READ(regs[pll->id].cfgcr1);
hw_state->cfgcr2 = I915_READ(regs[pll->id].cfgcr2);
}
+ ret = true;
- return true;
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
}
static void skl_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -2931,13 +2966,16 @@
{
enum port port = (enum port)pll->id; /* 1:1 port->PLL mapping */
uint32_t val;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
+ ret = false;
+
val = I915_READ(BXT_PORT_PLL_ENABLE(port));
if (!(val & PORT_PLL_ENABLE))
- return false;
+ goto out;
hw_state->ebb0 = I915_READ(BXT_PORT_PLL_EBB_0(port));
hw_state->ebb0 &= PORT_PLL_P1_MASK | PORT_PLL_P2_MASK;
@@ -2984,7 +3022,12 @@
I915_READ(BXT_PORT_PCS_DW12_LN23(port)));
hw_state->pcsdw12 &= LANE_STAGGER_MASK | LANESTAGGER_STRAP_OVRD;
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
+ return ret;
}
static void bxt_shared_dplls_init(struct drm_i915_private *dev_priv)
@@ -3119,11 +3162,15 @@
{
u32 temp;
- if (intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
+ if (intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_AUDIO)) {
temp = I915_READ(HSW_AUD_PIN_ELD_CP_VLD);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_AUDIO);
+
if (temp & AUDIO_OUTPUT_ENABLE(intel_crtc->pipe))
return true;
}
+
return false;
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 5feb657..46947ff 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -1351,18 +1351,21 @@
bool cur_state;
enum transcoder cpu_transcoder = intel_pipe_to_cpu_transcoder(dev_priv,
pipe);
+ enum intel_display_power_domain power_domain;
/* if we need the pipe quirk it must be always on */
if ((pipe == PIPE_A && dev_priv->quirks & QUIRK_PIPEA_FORCE) ||
(pipe == PIPE_B && dev_priv->quirks & QUIRK_PIPEB_FORCE))
state = true;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_TRANSCODER(cpu_transcoder))) {
- cur_state = false;
- } else {
+ power_domain = POWER_DOMAIN_TRANSCODER(cpu_transcoder);
+ if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
u32 val = I915_READ(PIPECONF(cpu_transcoder));
cur_state = !!(val & PIPECONF_ENABLE);
+
+ intel_display_power_put(dev_priv, power_domain);
+ } else {
+ cur_state = false;
}
I915_STATE_WARN(cur_state != state,
@@ -8171,18 +8174,22 @@
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ ret = false;
+
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
if (IS_G4X(dev) || IS_VALLEYVIEW(dev) || IS_CHERRYVIEW(dev)) {
switch (tmp & PIPECONF_BPC_MASK) {
@@ -8262,7 +8269,12 @@
pipe_config->base.adjusted_mode.crtc_clock =
pipe_config->port_clock / pipe_config->pixel_multiplier;
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void ironlake_init_pch_refclk(struct drm_device *dev)
@@ -9366,18 +9378,21 @@
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ enum intel_display_power_domain power_domain;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
+ ret = false;
tmp = I915_READ(PIPECONF(crtc->pipe));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
switch (tmp & PIPECONF_BPC_MASK) {
case PIPECONF_6BPC:
@@ -9440,7 +9455,12 @@
ironlake_get_pfit_config(crtc, pipe_config);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
@@ -9950,12 +9970,17 @@
{
struct drm_device *dev = crtc->base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
- enum intel_display_power_domain pfit_domain;
+ enum intel_display_power_domain power_domain;
+ unsigned long power_domain_mask;
uint32_t tmp;
+ bool ret;
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_PIPE(crtc->pipe)))
+ power_domain = POWER_DOMAIN_PIPE(crtc->pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ power_domain_mask = BIT(power_domain);
+
+ ret = false;
pipe_config->cpu_transcoder = (enum transcoder) crtc->pipe;
pipe_config->shared_dpll = DPLL_ID_PRIVATE;
@@ -9982,13 +10007,14 @@
pipe_config->cpu_transcoder = TRANSCODER_EDP;
}
- if (!intel_display_power_is_enabled(dev_priv,
- POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder)))
- return false;
+ power_domain = POWER_DOMAIN_TRANSCODER(pipe_config->cpu_transcoder);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
+ goto out;
+ power_domain_mask |= BIT(power_domain);
tmp = I915_READ(PIPECONF(pipe_config->cpu_transcoder));
if (!(tmp & PIPECONF_ENABLE))
- return false;
+ goto out;
haswell_get_ddi_port_state(crtc, pipe_config);
@@ -9998,14 +10024,14 @@
skl_init_scalers(dev, crtc, pipe_config);
}
- pfit_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
-
if (INTEL_INFO(dev)->gen >= 9) {
pipe_config->scaler_state.scaler_id = -1;
pipe_config->scaler_state.scaler_users &= ~(1 << SKL_CRTC_INDEX);
}
- if (intel_display_power_is_enabled(dev_priv, pfit_domain)) {
+ power_domain = POWER_DOMAIN_PIPE_PANEL_FITTER(crtc->pipe);
+ if (intel_display_power_get_if_enabled(dev_priv, power_domain)) {
+ power_domain_mask |= BIT(power_domain);
if (INTEL_INFO(dev)->gen >= 9)
skylake_get_pfit_config(crtc, pipe_config);
else
@@ -10023,7 +10049,13 @@
pipe_config->pixel_multiplier = 1;
}
- return true;
+ ret = true;
+
+out:
+ for_each_power_domain(power_domain, power_domain_mask)
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void i845_update_cursor(struct drm_crtc *crtc, u32 base, bool on)
@@ -13630,7 +13662,7 @@
{
uint32_t val;
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PLLS))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_PLLS))
return false;
val = I915_READ(PCH_DPLL(pll->id));
@@ -13638,6 +13670,8 @@
hw_state->fp0 = I915_READ(PCH_FP0(pll->id));
hw_state->fp1 = I915_READ(PCH_FP1(pll->id));
+ intel_display_power_put(dev_priv, POWER_DOMAIN_PLLS);
+
return val & DPLL_VCO_ENABLE;
}
@@ -15568,10 +15602,12 @@
* level, just check if the power well is enabled instead of trying to
* follow the "don't touch the power well if we don't need it" policy
* the rest of the driver uses. */
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_VGA))
+ if (!intel_display_power_get_if_enabled(dev_priv, POWER_DOMAIN_VGA))
return;
i915_redisable_vga_power_on(dev);
+
+ intel_display_power_put(dev_priv, POWER_DOMAIN_VGA);
}
static bool primary_get_hw_state(struct intel_plane *plane)
diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c
index 796e3d3..1d8de43 100644
--- a/drivers/gpu/drm/i915/intel_dp.c
+++ b/drivers/gpu/drm/i915/intel_dp.c
@@ -2362,15 +2362,18 @@
struct drm_i915_private *dev_priv = dev->dev_private;
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(intel_dp->output_reg);
if (!(tmp & DP_PORT_EN))
- return false;
+ goto out;
if (IS_GEN7(dev) && port == PORT_A) {
*pipe = PORT_TO_PIPE_CPT(tmp);
@@ -2381,7 +2384,9 @@
u32 trans_dp = I915_READ(TRANS_DP_CTL(p));
if (TRANS_DP_PIPE_TO_PORT(trans_dp) == port) {
*pipe = p;
- return true;
+ ret = true;
+
+ goto out;
}
}
@@ -2393,7 +2398,12 @@
*pipe = PORT_TO_PIPE(tmp);
}
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_dp_get_config(struct intel_encoder *encoder,
@@ -4493,20 +4503,20 @@
return I915_READ(PORT_HOTPLUG_STAT) & bit;
}
-static bool vlv_digital_port_connected(struct drm_i915_private *dev_priv,
- struct intel_digital_port *port)
+static bool gm45_digital_port_connected(struct drm_i915_private *dev_priv,
+ struct intel_digital_port *port)
{
u32 bit;
switch (port->port) {
case PORT_B:
- bit = PORTB_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTB_HOTPLUG_LIVE_STATUS_GM45;
break;
case PORT_C:
- bit = PORTC_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTC_HOTPLUG_LIVE_STATUS_GM45;
break;
case PORT_D:
- bit = PORTD_HOTPLUG_LIVE_STATUS_VLV;
+ bit = PORTD_HOTPLUG_LIVE_STATUS_GM45;
break;
default:
MISSING_CASE(port->port);
@@ -4558,8 +4568,8 @@
return cpt_digital_port_connected(dev_priv, port);
else if (IS_BROXTON(dev_priv))
return bxt_digital_port_connected(dev_priv, port);
- else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
- return vlv_digital_port_connected(dev_priv, port);
+ else if (IS_GM45(dev_priv))
+ return gm45_digital_port_connected(dev_priv, port);
else
return g4x_digital_port_connected(dev_priv, port);
}
diff --git a/drivers/gpu/drm/i915/intel_dp_link_training.c b/drivers/gpu/drm/i915/intel_dp_link_training.c
index 8888793..0b8eefc 100644
--- a/drivers/gpu/drm/i915/intel_dp_link_training.c
+++ b/drivers/gpu/drm/i915/intel_dp_link_training.c
@@ -215,27 +215,46 @@
}
}
+/*
+ * Pick training pattern for channel equalization. Training Pattern 3 for HBR2
+ * or 1.2 devices that support it, Training Pattern 2 otherwise.
+ */
+static u32 intel_dp_training_pattern(struct intel_dp *intel_dp)
+{
+ u32 training_pattern = DP_TRAINING_PATTERN_2;
+ bool source_tps3, sink_tps3;
+
+ /*
+ * Intel platforms that support HBR2 also support TPS3. TPS3 support is
+ * also mandatory for downstream devices that support HBR2. However, not
+ * all sinks follow the spec.
+ *
+ * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
+ * supported in source but still not enabled.
+ */
+ source_tps3 = intel_dp_source_supports_hbr2(intel_dp);
+ sink_tps3 = drm_dp_tps3_supported(intel_dp->dpcd);
+
+ if (source_tps3 && sink_tps3) {
+ training_pattern = DP_TRAINING_PATTERN_3;
+ } else if (intel_dp->link_rate == 540000) {
+ if (!source_tps3)
+ DRM_DEBUG_KMS("5.4 Gbps link rate without source HBR2/TPS3 support\n");
+ if (!sink_tps3)
+ DRM_DEBUG_KMS("5.4 Gbps link rate without sink TPS3 support\n");
+ }
+
+ return training_pattern;
+}
+
static void
intel_dp_link_training_channel_equalization(struct intel_dp *intel_dp)
{
bool channel_eq = false;
int tries, cr_tries;
- uint32_t training_pattern = DP_TRAINING_PATTERN_2;
+ u32 training_pattern;
- /*
- * Training Pattern 3 for HBR2 or 1.2 devices that support it.
- *
- * Intel platforms that support HBR2 also support TPS3. TPS3 support is
- * also mandatory for downstream devices that support HBR2.
- *
- * Due to WaDisableHBR2 SKL < B0 is the only exception where TPS3 is
- * supported but still not enabled.
- */
- if (intel_dp_source_supports_hbr2(intel_dp) &&
- drm_dp_tps3_supported(intel_dp->dpcd))
- training_pattern = DP_TRAINING_PATTERN_3;
- else if (intel_dp->link_rate == 540000)
- DRM_ERROR("5.4 Gbps link rate without HBR2/TPS3 support\n");
+ training_pattern = intel_dp_training_pattern(intel_dp);
/* channel equalization */
if (!intel_dp_set_link_train(intel_dp,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index ea54158..df7f3cb 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -1428,6 +1428,8 @@
enum intel_display_power_domain domain);
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
+bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain);
void intel_display_power_put(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain);
@@ -1514,6 +1516,7 @@
enable_rpm_wakeref_asserts(dev_priv)
void intel_runtime_pm_get(struct drm_i915_private *dev_priv);
+bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv);
void intel_runtime_pm_get_noresume(struct drm_i915_private *dev_priv);
void intel_runtime_pm_put(struct drm_i915_private *dev_priv);
diff --git a/drivers/gpu/drm/i915/intel_dsi.c b/drivers/gpu/drm/i915/intel_dsi.c
index 44742fa..0193c62a 100644
--- a/drivers/gpu/drm/i915/intel_dsi.c
+++ b/drivers/gpu/drm/i915/intel_dsi.c
@@ -664,13 +664,16 @@
struct drm_device *dev = encoder->base.dev;
enum intel_display_power_domain power_domain;
enum port port;
+ bool ret;
DRM_DEBUG_KMS("\n");
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
/* XXX: this only works for one DSI output */
for_each_dsi_port(port, intel_dsi->ports) {
i915_reg_t ctrl_reg = IS_BROXTON(dev) ?
@@ -691,12 +694,16 @@
if (dpi_enabled || (func & CMD_MODE_DATA_WIDTH_MASK)) {
if (I915_READ(MIPI_DEVICE_READY(port)) & DEVICE_READY) {
*pipe = port == PORT_A ? PIPE_A : PIPE_B;
- return true;
+ ret = true;
+
+ goto out;
}
}
}
+out:
+ intel_display_power_put(dev_priv, power_domain);
- return false;
+ return ret;
}
static void intel_dsi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
index a5e99ac..e8113ad 100644
--- a/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
+++ b/drivers/gpu/drm/i915/intel_dsi_panel_vbt.c
@@ -204,10 +204,28 @@
struct drm_device *dev = intel_dsi->base.base.dev;
struct drm_i915_private *dev_priv = dev->dev_private;
+ if (dev_priv->vbt.dsi.seq_version >= 3)
+ data++;
+
gpio = *data++;
/* pull up/down */
- action = *data++;
+ action = *data++ & 1;
+
+ if (gpio >= ARRAY_SIZE(gtable)) {
+ DRM_DEBUG_KMS("unknown gpio %u\n", gpio);
+ goto out;
+ }
+
+ if (!IS_VALLEYVIEW(dev_priv)) {
+ DRM_DEBUG_KMS("GPIO element not supported on this platform\n");
+ goto out;
+ }
+
+ if (dev_priv->vbt.dsi.seq_version >= 3) {
+ DRM_DEBUG_KMS("GPIO element v3 not supported\n");
+ goto out;
+ }
function = gtable[gpio].function_reg;
pad = gtable[gpio].pad_reg;
@@ -226,6 +244,7 @@
vlv_gpio_nc_write(dev_priv, pad, val);
mutex_unlock(&dev_priv->sb_lock);
+out:
return data;
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 4a77639..cb5d1b1 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -880,15 +880,18 @@
struct intel_hdmi *intel_hdmi = enc_to_intel_hdmi(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(intel_hdmi->hdmi_reg);
if (!(tmp & SDVO_ENABLE))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
@@ -897,7 +900,12 @@
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_hdmi_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_i2c.c b/drivers/gpu/drm/i915/intel_i2c.c
index 25254b5..deb8282 100644
--- a/drivers/gpu/drm/i915/intel_i2c.c
+++ b/drivers/gpu/drm/i915/intel_i2c.c
@@ -683,7 +683,7 @@
return 0;
err:
- while (--pin) {
+ while (pin--) {
if (!intel_gmbus_is_valid_pin(dev_priv, pin))
continue;
diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c
index 0da0240..bc04d8d 100644
--- a/drivers/gpu/drm/i915/intel_lvds.c
+++ b/drivers/gpu/drm/i915/intel_lvds.c
@@ -75,22 +75,30 @@
struct intel_lvds_encoder *lvds_encoder = to_lvds_encoder(&encoder->base);
enum intel_display_power_domain power_domain;
u32 tmp;
+ bool ret;
power_domain = intel_display_port_power_domain(encoder);
- if (!intel_display_power_is_enabled(dev_priv, power_domain))
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
return false;
+ ret = false;
+
tmp = I915_READ(lvds_encoder->reg);
if (!(tmp & LVDS_PORT_EN))
- return false;
+ goto out;
if (HAS_PCH_CPT(dev))
*pipe = PORT_TO_PIPE_CPT(tmp);
else
*pipe = PORT_TO_PIPE(tmp);
- return true;
+ ret = true;
+
+out:
+ intel_display_power_put(dev_priv, power_domain);
+
+ return ret;
}
static void intel_lvds_get_config(struct intel_encoder *encoder,
diff --git a/drivers/gpu/drm/i915/intel_pm.c b/drivers/gpu/drm/i915/intel_pm.c
index eb5fa05..b28c29f 100644
--- a/drivers/gpu/drm/i915/intel_pm.c
+++ b/drivers/gpu/drm/i915/intel_pm.c
@@ -1783,16 +1783,20 @@
const struct intel_plane_state *pstate,
uint32_t mem_value)
{
- int bpp = pstate->base.fb ? pstate->base.fb->bits_per_pixel / 8 : 0;
+ /*
+ * We treat the cursor plane as always-on for the purposes of watermark
+ * calculation. Until we have two-stage watermark programming merged,
+ * this is necessary to avoid flickering.
+ */
+ int cpp = 4;
+ int width = pstate->visible ? pstate->base.crtc_w : 64;
- if (!cstate->base.active || !pstate->visible)
+ if (!cstate->base.active)
return 0;
return ilk_wm_method2(ilk_pipe_pixel_rate(cstate),
cstate->base.adjusted_mode.crtc_htotal,
- drm_rect_width(&pstate->dst),
- bpp,
- mem_value);
+ width, cpp, mem_value);
}
/* Only for WM_LP. */
@@ -2825,7 +2829,10 @@
memset(ddb, 0, sizeof(*ddb));
for_each_pipe(dev_priv, pipe) {
- if (!intel_display_power_is_enabled(dev_priv, POWER_DOMAIN_PIPE(pipe)))
+ enum intel_display_power_domain power_domain;
+
+ power_domain = POWER_DOMAIN_PIPE(pipe);
+ if (!intel_display_power_get_if_enabled(dev_priv, power_domain))
continue;
for_each_plane(dev_priv, pipe, plane) {
@@ -2837,6 +2844,8 @@
val = I915_READ(CUR_BUF_CFG(pipe));
skl_ddb_entry_init_from_hw(&ddb->plane[pipe][PLANE_CURSOR],
val);
+
+ intel_display_power_put(dev_priv, power_domain);
}
}
diff --git a/drivers/gpu/drm/i915/intel_runtime_pm.c b/drivers/gpu/drm/i915/intel_runtime_pm.c
index ddbdbff..678ed34 100644
--- a/drivers/gpu/drm/i915/intel_runtime_pm.c
+++ b/drivers/gpu/drm/i915/intel_runtime_pm.c
@@ -470,6 +470,43 @@
}
}
+static void gen9_write_dc_state(struct drm_i915_private *dev_priv,
+ u32 state)
+{
+ int rewrites = 0;
+ int rereads = 0;
+ u32 v;
+
+ I915_WRITE(DC_STATE_EN, state);
+
+ /* It has been observed that disabling the dc6 state sometimes
+ * doesn't stick and dmc keeps returning old value. Make sure
+ * the write really sticks enough times and also force rewrite until
+ * we are confident that state is exactly what we want.
+ */
+ do {
+ v = I915_READ(DC_STATE_EN);
+
+ if (v != state) {
+ I915_WRITE(DC_STATE_EN, state);
+ rewrites++;
+ rereads = 0;
+ } else if (rereads++ > 5) {
+ break;
+ }
+
+ } while (rewrites < 100);
+
+ if (v != state)
+ DRM_ERROR("Writing dc state to 0x%x failed, now 0x%x\n",
+ state, v);
+
+ /* Most of the times we need one retry, avoid spam */
+ if (rewrites > 1)
+ DRM_DEBUG_KMS("Rewrote dc state to 0x%x %d times\n",
+ state, rewrites);
+}
+
static void gen9_set_dc_state(struct drm_i915_private *dev_priv, uint32_t state)
{
uint32_t val;
@@ -494,10 +531,18 @@
val = I915_READ(DC_STATE_EN);
DRM_DEBUG_KMS("Setting DC state from %02x to %02x\n",
val & mask, state);
+
+ /* Check if DMC is ignoring our DC state requests */
+ if ((val & mask) != dev_priv->csr.dc_state)
+ DRM_ERROR("DC state mismatch (0x%x -> 0x%x)\n",
+ dev_priv->csr.dc_state, val & mask);
+
val &= ~mask;
val |= state;
- I915_WRITE(DC_STATE_EN, val);
- POSTING_READ(DC_STATE_EN);
+
+ gen9_write_dc_state(dev_priv, val);
+
+ dev_priv->csr.dc_state = val & mask;
}
void bxt_enable_dc9(struct drm_i915_private *dev_priv)
@@ -1442,6 +1487,22 @@
chv_set_pipe_power_well(dev_priv, power_well, false);
}
+static void
+__intel_display_power_get_domain(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ struct i915_power_well *power_well;
+ int i;
+
+ for_each_power_well(i, power_well, BIT(domain), power_domains) {
+ if (!power_well->count++)
+ intel_power_well_enable(dev_priv, power_well);
+ }
+
+ power_domains->domain_use_count[domain]++;
+}
+
/**
* intel_display_power_get - grab a power domain reference
* @dev_priv: i915 device instance
@@ -1457,24 +1518,53 @@
void intel_display_power_get(struct drm_i915_private *dev_priv,
enum intel_display_power_domain domain)
{
- struct i915_power_domains *power_domains;
- struct i915_power_well *power_well;
- int i;
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
intel_runtime_pm_get(dev_priv);
- power_domains = &dev_priv->power_domains;
+ mutex_lock(&power_domains->lock);
+
+ __intel_display_power_get_domain(dev_priv, domain);
+
+ mutex_unlock(&power_domains->lock);
+}
+
+/**
+ * intel_display_power_get_if_enabled - grab a reference for an enabled display power domain
+ * @dev_priv: i915 device instance
+ * @domain: power domain to reference
+ *
+ * This function grabs a power domain reference for @domain and ensures that the
+ * power domain and all its parents are powered up. Therefore users should only
+ * grab a reference to the innermost power domain they need.
+ *
+ * Any power domain reference obtained by this function must have a symmetric
+ * call to intel_display_power_put() to release the reference again.
+ */
+bool intel_display_power_get_if_enabled(struct drm_i915_private *dev_priv,
+ enum intel_display_power_domain domain)
+{
+ struct i915_power_domains *power_domains = &dev_priv->power_domains;
+ bool is_enabled;
+
+ if (!intel_runtime_pm_get_if_in_use(dev_priv))
+ return false;
mutex_lock(&power_domains->lock);
- for_each_power_well(i, power_well, BIT(domain), power_domains) {
- if (!power_well->count++)
- intel_power_well_enable(dev_priv, power_well);
+ if (__intel_display_power_is_enabled(dev_priv, domain)) {
+ __intel_display_power_get_domain(dev_priv, domain);
+ is_enabled = true;
+ } else {
+ is_enabled = false;
}
- power_domains->domain_use_count[domain]++;
-
mutex_unlock(&power_domains->lock);
+
+ if (!is_enabled)
+ intel_runtime_pm_put(dev_priv);
+
+ return is_enabled;
}
/**
@@ -2246,6 +2336,43 @@
}
/**
+ * intel_runtime_pm_get_if_in_use - grab a runtime pm reference if device in use
+ * @dev_priv: i915 device instance
+ *
+ * This function grabs a device-level runtime pm reference if the device is
+ * already in use and ensures that it is powered up.
+ *
+ * Any runtime pm reference obtained by this function must have a symmetric
+ * call to intel_runtime_pm_put() to release the reference again.
+ */
+bool intel_runtime_pm_get_if_in_use(struct drm_i915_private *dev_priv)
+{
+ struct drm_device *dev = dev_priv->dev;
+ struct device *device = &dev->pdev->dev;
+ int ret;
+
+ if (!IS_ENABLED(CONFIG_PM))
+ return true;
+
+ ret = pm_runtime_get_if_in_use(device);
+
+ /*
+ * In cases runtime PM is disabled by the RPM core and we get an
+ * -EINVAL return value we are not supposed to call this function,
+ * since the power state is undefined. This applies atm to the
+ * late/early system suspend/resume handlers.
+ */
+ WARN_ON_ONCE(ret < 0);
+ if (ret <= 0)
+ return false;
+
+ atomic_inc(&dev_priv->pm.wakeref_count);
+ assert_rpm_wakelock_held(dev_priv);
+
+ return true;
+}
+
+/**
* intel_runtime_pm_get_noresume - grab a runtime pm reference
* @dev_priv: i915 device instance
*
diff --git a/drivers/gpu/drm/nouveau/nouveau_bo.c b/drivers/gpu/drm/nouveau/nouveau_bo.c
index 78f520d..e3acc35 100644
--- a/drivers/gpu/drm/nouveau/nouveau_bo.c
+++ b/drivers/gpu/drm/nouveau/nouveau_bo.c
@@ -1520,7 +1520,7 @@
DMA_BIDIRECTIONAL);
if (dma_mapping_error(pdev, addr)) {
- while (--i) {
+ while (i--) {
dma_unmap_page(pdev, ttm_dma->dma_address[i],
PAGE_SIZE, DMA_BIDIRECTIONAL);
ttm_dma->dma_address[i] = 0;
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 24be27d..20935eb 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -635,10 +635,6 @@
nv_crtc->lut.depth = 0;
}
- /* Make sure that drm and hw vblank irqs get resumed if needed. */
- for (head = 0; head < dev->mode_config.num_crtc; head++)
- drm_vblank_on(dev, head);
-
/* This should ensure we don't hit a locking problem when someone
* wakes us up via a connector. We should never go into suspend
* while the display is on anyways.
@@ -648,6 +644,10 @@
drm_helper_resume_force_mode(dev);
+ /* Make sure that drm and hw vblank irqs get resumed if needed. */
+ for (head = 0; head < dev->mode_config.num_crtc; head++)
+ drm_vblank_on(dev, head);
+
list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
struct nouveau_crtc *nv_crtc = nouveau_crtc(crtc);
diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c
index 8a70cec..2dfe58a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_platform.c
+++ b/drivers/gpu/drm/nouveau/nouveau_platform.c
@@ -24,7 +24,7 @@
static int nouveau_platform_probe(struct platform_device *pdev)
{
const struct nvkm_device_tegra_func *func;
- struct nvkm_device *device;
+ struct nvkm_device *device = NULL;
struct drm_device *drm;
int ret;
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
index 7f8a427..e7e581d 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/device/tegra.c
@@ -252,32 +252,40 @@
if (!(tdev = kzalloc(sizeof(*tdev), GFP_KERNEL)))
return -ENOMEM;
- *pdevice = &tdev->device;
+
tdev->func = func;
tdev->pdev = pdev;
tdev->irq = -1;
tdev->vdd = devm_regulator_get(&pdev->dev, "vdd");
- if (IS_ERR(tdev->vdd))
- return PTR_ERR(tdev->vdd);
+ if (IS_ERR(tdev->vdd)) {
+ ret = PTR_ERR(tdev->vdd);
+ goto free;
+ }
tdev->rst = devm_reset_control_get(&pdev->dev, "gpu");
- if (IS_ERR(tdev->rst))
- return PTR_ERR(tdev->rst);
+ if (IS_ERR(tdev->rst)) {
+ ret = PTR_ERR(tdev->rst);
+ goto free;
+ }
tdev->clk = devm_clk_get(&pdev->dev, "gpu");
- if (IS_ERR(tdev->clk))
- return PTR_ERR(tdev->clk);
+ if (IS_ERR(tdev->clk)) {
+ ret = PTR_ERR(tdev->clk);
+ goto free;
+ }
tdev->clk_pwr = devm_clk_get(&pdev->dev, "pwr");
- if (IS_ERR(tdev->clk_pwr))
- return PTR_ERR(tdev->clk_pwr);
+ if (IS_ERR(tdev->clk_pwr)) {
+ ret = PTR_ERR(tdev->clk_pwr);
+ goto free;
+ }
nvkm_device_tegra_probe_iommu(tdev);
ret = nvkm_device_tegra_power_up(tdev);
if (ret)
- return ret;
+ goto remove;
tdev->gpu_speedo = tegra_sku_info.gpu_speedo_value;
ret = nvkm_device_ctor(&nvkm_device_tegra_func, NULL, &pdev->dev,
@@ -285,9 +293,19 @@
cfg, dbg, detect, mmio, subdev_mask,
&tdev->device);
if (ret)
- return ret;
+ goto powerdown;
+
+ *pdevice = &tdev->device;
return 0;
+
+powerdown:
+ nvkm_device_tegra_power_down(tdev);
+remove:
+ nvkm_device_tegra_remove_iommu(tdev);
+free:
+ kfree(tdev);
+ return ret;
}
#else
int
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
index 74e2f7c..9688970 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
@@ -328,6 +328,7 @@
.outp = outp,
}, *dp = &_dp;
u32 datarate = 0;
+ u8 pwr;
int ret;
if (!outp->base.info.location && disp->func->sor.magic)
@@ -355,6 +356,15 @@
/* disable link interrupt handling during link training */
nvkm_notify_put(&outp->irq);
+ /* ensure sink is not in a low-power state */
+ if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) {
+ if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) {
+ pwr &= ~DPCD_SC00_SET_POWER;
+ pwr |= DPCD_SC00_SET_POWER_D0;
+ nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1);
+ }
+ }
+
/* enable down-spreading and execute pre-train script from vbios */
dp_link_train_init(dp, outp->dpcd[3] & 0x01);
diff --git a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
index 9596290..6e10c5e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
+++ b/drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.h
@@ -71,5 +71,11 @@
#define DPCD_LS0C_LANE1_POST_CURSOR2 0x0c
#define DPCD_LS0C_LANE0_POST_CURSOR2 0x03
+/* DPCD Sink Control */
+#define DPCD_SC00 0x00600
+#define DPCD_SC00_SET_POWER 0x03
+#define DPCD_SC00_SET_POWER_D0 0x01
+#define DPCD_SC00_SET_POWER_D3 0x03
+
void nvkm_dp_train(struct work_struct *);
#endif
diff --git a/drivers/gpu/drm/qxl/qxl_ioctl.c b/drivers/gpu/drm/qxl/qxl_ioctl.c
index 2ae8577..7c2e782 100644
--- a/drivers/gpu/drm/qxl/qxl_ioctl.c
+++ b/drivers/gpu/drm/qxl/qxl_ioctl.c
@@ -168,7 +168,8 @@
cmd->command_size))
return -EFAULT;
- reloc_info = kmalloc(sizeof(struct qxl_reloc_info) * cmd->relocs_num, GFP_KERNEL);
+ reloc_info = kmalloc_array(cmd->relocs_num,
+ sizeof(struct qxl_reloc_info), GFP_KERNEL);
if (!reloc_info)
return -ENOMEM;
diff --git a/drivers/gpu/drm/qxl/qxl_prime.c b/drivers/gpu/drm/qxl/qxl_prime.c
index 3d031b5..9f029dd 100644
--- a/drivers/gpu/drm/qxl/qxl_prime.c
+++ b/drivers/gpu/drm/qxl/qxl_prime.c
@@ -68,5 +68,5 @@
struct vm_area_struct *area)
{
WARN_ONCE(1, "not implemented");
- return ENOSYS;
+ return -ENOSYS;
}
diff --git a/drivers/gpu/drm/radeon/radeon_display.c b/drivers/gpu/drm/radeon/radeon_display.c
index 298ea1c..2b9ba03 100644
--- a/drivers/gpu/drm/radeon/radeon_display.c
+++ b/drivers/gpu/drm/radeon/radeon_display.c
@@ -403,7 +403,8 @@
struct drm_crtc *crtc = &radeon_crtc->base;
unsigned long flags;
int r;
- int vpos, hpos, stat, min_udelay;
+ int vpos, hpos, stat, min_udelay = 0;
+ unsigned repcnt = 4;
struct drm_vblank_crtc *vblank = &crtc->dev->vblank[work->crtc_id];
down_read(&rdev->exclusive_lock);
@@ -454,7 +455,7 @@
* In practice this won't execute very often unless on very fast
* machines because the time window for this to happen is very small.
*/
- for (;;) {
+ while (radeon_crtc->enabled && repcnt--) {
/* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
* start in hpos, and to the "fudged earlier" vblank start in
* vpos.
@@ -472,10 +473,22 @@
/* Sleep at least until estimated real start of hw vblank */
spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
min_udelay = (-hpos + 1) * max(vblank->linedur_ns / 1000, 5);
+ if (min_udelay > vblank->framedur_ns / 2000) {
+ /* Don't wait ridiculously long - something is wrong */
+ repcnt = 0;
+ break;
+ }
usleep_range(min_udelay, 2 * min_udelay);
spin_lock_irqsave(&crtc->dev->event_lock, flags);
};
+ if (!repcnt)
+ DRM_DEBUG_DRIVER("Delay problem on crtc %d: min_udelay %d, "
+ "framedur %d, linedur %d, stat %d, vpos %d, "
+ "hpos %d\n", work->crtc_id, min_udelay,
+ vblank->framedur_ns / 1000,
+ vblank->linedur_ns / 1000, stat, vpos, hpos);
+
/* do the flip (mmio) */
radeon_page_flip(rdev, radeon_crtc->crtc_id, work->base);
diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c
index 460c8f2..ca3be90 100644
--- a/drivers/gpu/drm/radeon/radeon_pm.c
+++ b/drivers/gpu/drm/radeon/radeon_pm.c
@@ -276,8 +276,12 @@
if (rdev->irq.installed) {
for (i = 0; i < rdev->num_crtc; i++) {
if (rdev->pm.active_crtcs & (1 << i)) {
- rdev->pm.req_vblank |= (1 << i);
- drm_vblank_get(rdev->ddev, i);
+ /* This can fail if a modeset is in progress */
+ if (drm_vblank_get(rdev->ddev, i) == 0)
+ rdev->pm.req_vblank |= (1 << i);
+ else
+ DRM_DEBUG_DRIVER("crtc %d no vblank, can glitch\n",
+ i);
}
}
}
@@ -1075,8 +1079,6 @@
/* update display watermarks based on new power state */
radeon_bandwidth_update(rdev);
- /* update displays */
- radeon_dpm_display_configuration_changed(rdev);
rdev->pm.dpm.current_active_crtcs = rdev->pm.dpm.new_active_crtcs;
rdev->pm.dpm.current_active_crtc_count = rdev->pm.dpm.new_active_crtc_count;
@@ -1097,6 +1099,9 @@
radeon_dpm_post_set_power_state(rdev);
+ /* update displays */
+ radeon_dpm_display_configuration_changed(rdev);
+
if (rdev->asic->dpm.force_performance_level) {
if (rdev->pm.dpm.thermal_active) {
enum radeon_dpm_forced_level level = rdev->pm.dpm.forced_level;
diff --git a/drivers/gpu/drm/radeon/radeon_sa.c b/drivers/gpu/drm/radeon/radeon_sa.c
index c507896..197b157 100644
--- a/drivers/gpu/drm/radeon/radeon_sa.c
+++ b/drivers/gpu/drm/radeon/radeon_sa.c
@@ -349,8 +349,13 @@
/* see if we can skip over some allocations */
} while (radeon_sa_bo_next_hole(sa_manager, fences, tries));
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ radeon_fence_ref(fences[i]);
+
spin_unlock(&sa_manager->wq.lock);
r = radeon_fence_wait_any(rdev, fences, false);
+ for (i = 0; i < RADEON_NUM_RINGS; ++i)
+ radeon_fence_unref(&fences[i]);
spin_lock(&sa_manager->wq.lock);
/* if we have nothing to wait for block */
if (r == -ENOENT) {
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index e343074..e06ac54 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -758,7 +758,7 @@
0, PAGE_SIZE,
PCI_DMA_BIDIRECTIONAL);
if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
- while (--i) {
+ while (i--) {
pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
gtt->ttm.dma_address[i] = 0;
diff --git a/drivers/gpu/drm/vc4/vc4_bo.c b/drivers/gpu/drm/vc4/vc4_bo.c
index 18dfe3e..22278bc 100644
--- a/drivers/gpu/drm/vc4/vc4_bo.c
+++ b/drivers/gpu/drm/vc4/vc4_bo.c
@@ -215,7 +215,7 @@
struct drm_gem_cma_object *cma_obj;
if (size == 0)
- return NULL;
+ return ERR_PTR(-EINVAL);
/* First, try to get a vc4_bo from the kernel BO cache. */
if (from_cache) {
@@ -237,7 +237,7 @@
if (IS_ERR(cma_obj)) {
DRM_ERROR("Failed to allocate from CMA:\n");
vc4_bo_stats_dump(vc4);
- return NULL;
+ return ERR_PTR(-ENOMEM);
}
}
@@ -259,8 +259,8 @@
args->size = args->pitch * args->height;
bo = vc4_bo_create(dev, args->size, false);
- if (!bo)
- return -ENOMEM;
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -443,8 +443,8 @@
* get zeroed, and that might leak data between users.
*/
bo = vc4_bo_create(dev, args->size, false);
- if (!bo)
- return -ENOMEM;
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
ret = drm_gem_handle_create(file_priv, &bo->base.base, &args->handle);
drm_gem_object_unreference_unlocked(&bo->base.base);
@@ -496,8 +496,8 @@
}
bo = vc4_bo_create(dev, args->size, true);
- if (!bo)
- return -ENOMEM;
+ if (IS_ERR(bo))
+ return PTR_ERR(bo);
ret = copy_from_user(bo->base.vaddr,
(void __user *)(uintptr_t)args->data,
diff --git a/drivers/gpu/drm/vc4/vc4_drv.h b/drivers/gpu/drm/vc4/vc4_drv.h
index 080865e..51a6333 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.h
+++ b/drivers/gpu/drm/vc4/vc4_drv.h
@@ -91,8 +91,12 @@
struct vc4_bo *overflow_mem;
struct work_struct overflow_mem_work;
+ int power_refcount;
+
+ /* Mutex controlling the power refcount. */
+ struct mutex power_lock;
+
struct {
- uint32_t last_ct0ca, last_ct1ca;
struct timer_list timer;
struct work_struct reset_work;
} hangcheck;
@@ -142,6 +146,7 @@
};
struct vc4_v3d {
+ struct vc4_dev *vc4;
struct platform_device *pdev;
void __iomem *regs;
};
@@ -192,6 +197,11 @@
/* Sequence number for this bin/render job. */
uint64_t seqno;
+ /* Last current addresses the hardware was processing when the
+ * hangcheck timer checked on us.
+ */
+ uint32_t last_ct0ca, last_ct1ca;
+
/* Kernel-space copy of the ioctl arguments */
struct drm_vc4_submit_cl *args;
@@ -434,7 +444,6 @@
extern struct platform_driver vc4_v3d_driver;
int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused);
int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused);
-int vc4_v3d_set_power(struct vc4_dev *vc4, bool on);
/* vc4_validate.c */
int
diff --git a/drivers/gpu/drm/vc4/vc4_gem.c b/drivers/gpu/drm/vc4/vc4_gem.c
index 48ce30a..202aa15 100644
--- a/drivers/gpu/drm/vc4/vc4_gem.c
+++ b/drivers/gpu/drm/vc4/vc4_gem.c
@@ -23,6 +23,7 @@
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
#include <linux/device.h>
#include <linux/io.h>
@@ -228,8 +229,16 @@
struct vc4_dev *vc4 = to_vc4_dev(dev);
DRM_INFO("Resetting GPU.\n");
- vc4_v3d_set_power(vc4, false);
- vc4_v3d_set_power(vc4, true);
+
+ mutex_lock(&vc4->power_lock);
+ if (vc4->power_refcount) {
+ /* Power the device off and back on the by dropping the
+ * reference on runtime PM.
+ */
+ pm_runtime_put_sync_suspend(&vc4->v3d->pdev->dev);
+ pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ }
+ mutex_unlock(&vc4->power_lock);
vc4_irq_reset(dev);
@@ -257,10 +266,17 @@
struct drm_device *dev = (struct drm_device *)data;
struct vc4_dev *vc4 = to_vc4_dev(dev);
uint32_t ct0ca, ct1ca;
+ unsigned long irqflags;
+ struct vc4_exec_info *exec;
+
+ spin_lock_irqsave(&vc4->job_lock, irqflags);
+ exec = vc4_first_job(vc4);
/* If idle, we can stop watching for hangs. */
- if (list_empty(&vc4->job_list))
+ if (!exec) {
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
return;
+ }
ct0ca = V3D_READ(V3D_CTNCA(0));
ct1ca = V3D_READ(V3D_CTNCA(1));
@@ -268,14 +284,16 @@
/* If we've made any progress in execution, rearm the timer
* and wait.
*/
- if (ct0ca != vc4->hangcheck.last_ct0ca ||
- ct1ca != vc4->hangcheck.last_ct1ca) {
- vc4->hangcheck.last_ct0ca = ct0ca;
- vc4->hangcheck.last_ct1ca = ct1ca;
+ if (ct0ca != exec->last_ct0ca || ct1ca != exec->last_ct1ca) {
+ exec->last_ct0ca = ct0ca;
+ exec->last_ct1ca = ct1ca;
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
vc4_queue_hangcheck(dev);
return;
}
+ spin_unlock_irqrestore(&vc4->job_lock, irqflags);
+
/* We've gone too long with no progress, reset. This has to
* be done from a work struct, since resetting can sleep and
* this timer hook isn't allowed to.
@@ -340,12 +358,7 @@
finish_wait(&vc4->job_wait_queue, &wait);
trace_vc4_wait_for_seqno_end(dev, seqno);
- if (ret && ret != -ERESTARTSYS) {
- DRM_ERROR("timeout waiting for render thread idle\n");
- return ret;
- }
-
- return 0;
+ return ret;
}
static void
@@ -578,9 +591,9 @@
}
bo = vc4_bo_create(dev, exec_size, true);
- if (!bo) {
+ if (IS_ERR(bo)) {
DRM_ERROR("Couldn't allocate BO for binning\n");
- ret = -ENOMEM;
+ ret = PTR_ERR(bo);
goto fail;
}
exec->exec_bo = &bo->base;
@@ -617,6 +630,7 @@
static void
vc4_complete_exec(struct drm_device *dev, struct vc4_exec_info *exec)
{
+ struct vc4_dev *vc4 = to_vc4_dev(dev);
unsigned i;
/* Need the struct lock for drm_gem_object_unreference(). */
@@ -635,6 +649,11 @@
}
mutex_unlock(&dev->struct_mutex);
+ mutex_lock(&vc4->power_lock);
+ if (--vc4->power_refcount == 0)
+ pm_runtime_put(&vc4->v3d->pdev->dev);
+ mutex_unlock(&vc4->power_lock);
+
kfree(exec);
}
@@ -746,6 +765,9 @@
struct drm_gem_object *gem_obj;
struct vc4_bo *bo;
+ if (args->pad != 0)
+ return -EINVAL;
+
gem_obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (!gem_obj) {
DRM_ERROR("Failed to look up GEM BO %d\n", args->handle);
@@ -772,7 +794,7 @@
struct vc4_dev *vc4 = to_vc4_dev(dev);
struct drm_vc4_submit_cl *args = data;
struct vc4_exec_info *exec;
- int ret;
+ int ret = 0;
if ((args->flags & ~VC4_SUBMIT_CL_USE_CLEAR_COLOR) != 0) {
DRM_ERROR("Unknown flags: 0x%02x\n", args->flags);
@@ -785,6 +807,15 @@
return -ENOMEM;
}
+ mutex_lock(&vc4->power_lock);
+ if (vc4->power_refcount++ == 0)
+ ret = pm_runtime_get_sync(&vc4->v3d->pdev->dev);
+ mutex_unlock(&vc4->power_lock);
+ if (ret < 0) {
+ kfree(exec);
+ return ret;
+ }
+
exec->args = args;
INIT_LIST_HEAD(&exec->unref_list);
@@ -839,6 +870,8 @@
(unsigned long)dev);
INIT_WORK(&vc4->job_done_work, vc4_job_done_work);
+
+ mutex_init(&vc4->power_lock);
}
void
diff --git a/drivers/gpu/drm/vc4/vc4_irq.c b/drivers/gpu/drm/vc4/vc4_irq.c
index b68060e..78a2135 100644
--- a/drivers/gpu/drm/vc4/vc4_irq.c
+++ b/drivers/gpu/drm/vc4/vc4_irq.c
@@ -57,7 +57,7 @@
struct vc4_bo *bo;
bo = vc4_bo_create(dev, 256 * 1024, true);
- if (!bo) {
+ if (IS_ERR(bo)) {
DRM_ERROR("Couldn't allocate binner overflow mem\n");
return;
}
diff --git a/drivers/gpu/drm/vc4/vc4_render_cl.c b/drivers/gpu/drm/vc4/vc4_render_cl.c
index 8a2a312..0f12418 100644
--- a/drivers/gpu/drm/vc4/vc4_render_cl.c
+++ b/drivers/gpu/drm/vc4/vc4_render_cl.c
@@ -316,20 +316,11 @@
size += xtiles * ytiles * loop_body_size;
setup->rcl = &vc4_bo_create(dev, size, true)->base;
- if (!setup->rcl)
- return -ENOMEM;
+ if (IS_ERR(setup->rcl))
+ return PTR_ERR(setup->rcl);
list_add_tail(&to_vc4_bo(&setup->rcl->base)->unref_head,
&exec->unref_list);
- rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
- rcl_u32(setup,
- (setup->color_write ? (setup->color_write->paddr +
- args->color_write.offset) :
- 0));
- rcl_u16(setup, args->width);
- rcl_u16(setup, args->height);
- rcl_u16(setup, args->color_write.bits);
-
/* The tile buffer gets cleared when the previous tile is stored. If
* the clear values changed between frames, then the tile buffer has
* stale clear values in it, so we have to do a store in None mode (no
@@ -349,6 +340,15 @@
rcl_u32(setup, 0); /* no address, since we're in None mode */
}
+ rcl_u8(setup, VC4_PACKET_TILE_RENDERING_MODE_CONFIG);
+ rcl_u32(setup,
+ (setup->color_write ? (setup->color_write->paddr +
+ args->color_write.offset) :
+ 0));
+ rcl_u16(setup, args->width);
+ rcl_u16(setup, args->height);
+ rcl_u16(setup, args->color_write.bits);
+
for (y = min_y_tile; y <= max_y_tile; y++) {
for (x = min_x_tile; x <= max_x_tile; x++) {
bool first = (x == min_x_tile && y == min_y_tile);
diff --git a/drivers/gpu/drm/vc4/vc4_v3d.c b/drivers/gpu/drm/vc4/vc4_v3d.c
index 314ff71..31de5d1 100644
--- a/drivers/gpu/drm/vc4/vc4_v3d.c
+++ b/drivers/gpu/drm/vc4/vc4_v3d.c
@@ -17,6 +17,7 @@
*/
#include "linux/component.h"
+#include "linux/pm_runtime.h"
#include "vc4_drv.h"
#include "vc4_regs.h"
@@ -144,18 +145,6 @@
}
#endif /* CONFIG_DEBUG_FS */
-int
-vc4_v3d_set_power(struct vc4_dev *vc4, bool on)
-{
- /* XXX: This interface is needed for GPU reset, and the way to
- * do it is to turn our power domain off and back on. We
- * can't just reset from within the driver, because the reset
- * bits are in the power domain's register area, and get set
- * during the poweron process.
- */
- return 0;
-}
-
static void vc4_v3d_init_hw(struct drm_device *dev)
{
struct vc4_dev *vc4 = to_vc4_dev(dev);
@@ -167,6 +156,29 @@
V3D_WRITE(V3D_VPMBASE, 0);
}
+#ifdef CONFIG_PM
+static int vc4_v3d_runtime_suspend(struct device *dev)
+{
+ struct vc4_v3d *v3d = dev_get_drvdata(dev);
+ struct vc4_dev *vc4 = v3d->vc4;
+
+ vc4_irq_uninstall(vc4->dev);
+
+ return 0;
+}
+
+static int vc4_v3d_runtime_resume(struct device *dev)
+{
+ struct vc4_v3d *v3d = dev_get_drvdata(dev);
+ struct vc4_dev *vc4 = v3d->vc4;
+
+ vc4_v3d_init_hw(vc4->dev);
+ vc4_irq_postinstall(vc4->dev);
+
+ return 0;
+}
+#endif
+
static int vc4_v3d_bind(struct device *dev, struct device *master, void *data)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -179,6 +191,8 @@
if (!v3d)
return -ENOMEM;
+ dev_set_drvdata(dev, v3d);
+
v3d->pdev = pdev;
v3d->regs = vc4_ioremap_regs(pdev, 0);
@@ -186,6 +200,7 @@
return PTR_ERR(v3d->regs);
vc4->v3d = v3d;
+ v3d->vc4 = vc4;
if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) {
DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n",
@@ -207,6 +222,8 @@
return ret;
}
+ pm_runtime_enable(dev);
+
return 0;
}
@@ -216,6 +233,8 @@
struct drm_device *drm = dev_get_drvdata(master);
struct vc4_dev *vc4 = to_vc4_dev(drm);
+ pm_runtime_disable(dev);
+
drm_irq_uninstall(drm);
/* Disable the binner's overflow memory address, so the next
@@ -228,6 +247,10 @@
vc4->v3d = NULL;
}
+static const struct dev_pm_ops vc4_v3d_pm_ops = {
+ SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL)
+};
+
static const struct component_ops vc4_v3d_ops = {
.bind = vc4_v3d_bind,
.unbind = vc4_v3d_unbind,
@@ -255,5 +278,6 @@
.driver = {
.name = "vc4_v3d",
.of_match_table = vc4_v3d_dt_match,
+ .pm = &vc4_v3d_pm_ops,
},
};
diff --git a/drivers/gpu/drm/vc4/vc4_validate.c b/drivers/gpu/drm/vc4/vc4_validate.c
index e26d9f6..24c2c74 100644
--- a/drivers/gpu/drm/vc4/vc4_validate.c
+++ b/drivers/gpu/drm/vc4/vc4_validate.c
@@ -401,8 +401,8 @@
tile_bo = vc4_bo_create(dev, exec->tile_alloc_offset + tile_alloc_size,
true);
exec->tile_bo = &tile_bo->base;
- if (!exec->tile_bo)
- return -ENOMEM;
+ if (IS_ERR(exec->tile_bo))
+ return PTR_ERR(exec->tile_bo);
list_add_tail(&tile_bo->unref_head, &exec->unref_list);
/* tile alloc address. */
diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
index 4ebc796..2f8c0f4 100644
--- a/drivers/hv/hyperv_vmbus.h
+++ b/drivers/hv/hyperv_vmbus.h
@@ -256,12 +256,6 @@
u8 rsvdz4[1984];
};
-/* Declare the various hypercall operations. */
-enum hv_call_code {
- HVCALL_POST_MESSAGE = 0x005c,
- HVCALL_SIGNAL_EVENT = 0x005d,
-};
-
/* Definition of the hv_post_message hypercall input structure. */
struct hv_input_post_message {
union hv_connection_id connectionid;
diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c
index f155b83..2b3105c 100644
--- a/drivers/hwmon/ads1015.c
+++ b/drivers/hwmon/ads1015.c
@@ -126,7 +126,7 @@
struct ads1015_data *data = i2c_get_clientdata(client);
unsigned int pga = data->channel_data[channel].pga;
int fullscale = fullscale_table[pga];
- const unsigned mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
+ const int mask = data->id == ads1115 ? 0x7fff : 0x7ff0;
return DIV_ROUND_CLOSEST(reg * fullscale, mask);
}
diff --git a/drivers/hwmon/gpio-fan.c b/drivers/hwmon/gpio-fan.c
index 82de3de..685568b 100644
--- a/drivers/hwmon/gpio-fan.c
+++ b/drivers/hwmon/gpio-fan.c
@@ -406,16 +406,11 @@
unsigned long *state)
{
struct gpio_fan_data *fan_data = cdev->devdata;
- int r;
if (!fan_data)
return -EINVAL;
- r = get_fan_speed_index(fan_data);
- if (r < 0)
- return r;
-
- *state = r;
+ *state = fan_data->speed_index;
return 0;
}
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index f62d697..27fa0cb 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1271,6 +1271,8 @@
switch (dev->device) {
case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_SMBUS:
case PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_SMBUS:
+ case PCI_DEVICE_ID_INTEL_LEWISBURG_SMBUS:
+ case PCI_DEVICE_ID_INTEL_LEWISBURG_SSKU_SMBUS:
case PCI_DEVICE_ID_INTEL_DNV_SMBUS:
priv->features |= FEATURE_I2C_BLOCK_READ;
priv->features |= FEATURE_IRQ;
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 08d26ba..13c4529 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1450,7 +1450,8 @@
err_unuse_clocks:
omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
- pm_runtime_put(omap->dev);
+ pm_runtime_dont_use_autosuspend(omap->dev);
+ pm_runtime_put_sync(omap->dev);
pm_runtime_disable(&pdev->dev);
err_free_mem:
@@ -1468,6 +1469,7 @@
return ret;
omap_i2c_write_reg(omap, OMAP_I2C_CON_REG, 0);
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
return 0;
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index f3e5ff8..213ba55 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -467,7 +467,7 @@
bus_speed = UNIPHIER_FI2C_DEFAULT_SPEED;
if (!bus_speed) {
- dev_err(dev, "clock-freqyency should not be zero\n");
+ dev_err(dev, "clock-frequency should not be zero\n");
return -EINVAL;
}
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 1f4f3f5..89eaa8a 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -328,7 +328,7 @@
bus_speed = UNIPHIER_I2C_DEFAULT_SPEED;
if (!bus_speed) {
- dev_err(dev, "clock-freqyency should not be zero\n");
+ dev_err(dev, "clock-frequency should not be zero\n");
return -EINVAL;
}
diff --git a/drivers/infiniband/core/sysfs.c b/drivers/infiniband/core/sysfs.c
index 3de9351..14606af 100644
--- a/drivers/infiniband/core/sysfs.c
+++ b/drivers/infiniband/core/sysfs.c
@@ -336,7 +336,6 @@
union ib_gid gid;
struct ib_gid_attr gid_attr = {};
ssize_t ret;
- va_list args;
ret = ib_query_gid(p->ibdev, p->port_num, tab_attr->index, &gid,
&gid_attr);
@@ -348,7 +347,6 @@
err:
if (gid_attr.ndev)
dev_put(gid_attr.ndev);
- va_end(args);
return ret;
}
@@ -722,12 +720,11 @@
if (get_perf_mad(dev, port_num, IB_PMA_CLASS_PORT_INFO,
&cpi, 40, sizeof(cpi)) >= 0) {
-
- if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH)
+ if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH)
/* We have extended counters */
return &pma_group_ext;
- if (cpi.capability_mask && IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
+ if (cpi.capability_mask & IB_PMA_CLASS_CAP_EXT_WIDTH_NOIETF)
/* But not the IETF ones */
return &pma_group_noietf;
}
diff --git a/drivers/infiniband/hw/mlx4/mad.c b/drivers/infiniband/hw/mlx4/mad.c
index 26833bf..d68f506 100644
--- a/drivers/infiniband/hw/mlx4/mad.c
+++ b/drivers/infiniband/hw/mlx4/mad.c
@@ -817,17 +817,48 @@
return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
-static void edit_counter(struct mlx4_counter *cnt,
- struct ib_pma_portcounters *pma_cnt)
+static void edit_counter(struct mlx4_counter *cnt, void *counters,
+ __be16 attr_id)
{
- ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
- (be64_to_cpu(cnt->tx_bytes) >> 2));
- ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
- (be64_to_cpu(cnt->rx_bytes) >> 2));
- ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
- be64_to_cpu(cnt->tx_frames));
- ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
- be64_to_cpu(cnt->rx_frames));
+ switch (attr_id) {
+ case IB_PMA_PORT_COUNTERS:
+ {
+ struct ib_pma_portcounters *pma_cnt =
+ (struct ib_pma_portcounters *)counters;
+
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_data,
+ (be64_to_cpu(cnt->tx_bytes) >> 2));
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_data,
+ (be64_to_cpu(cnt->rx_bytes) >> 2));
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_xmit_packets,
+ be64_to_cpu(cnt->tx_frames));
+ ASSIGN_32BIT_COUNTER(pma_cnt->port_rcv_packets,
+ be64_to_cpu(cnt->rx_frames));
+ break;
+ }
+ case IB_PMA_PORT_COUNTERS_EXT:
+ {
+ struct ib_pma_portcounters_ext *pma_cnt_ext =
+ (struct ib_pma_portcounters_ext *)counters;
+
+ pma_cnt_ext->port_xmit_data =
+ cpu_to_be64(be64_to_cpu(cnt->tx_bytes) >> 2);
+ pma_cnt_ext->port_rcv_data =
+ cpu_to_be64(be64_to_cpu(cnt->rx_bytes) >> 2);
+ pma_cnt_ext->port_xmit_packets = cnt->tx_frames;
+ pma_cnt_ext->port_rcv_packets = cnt->rx_frames;
+ break;
+ }
+ }
+}
+
+static int iboe_process_mad_port_info(void *out_mad)
+{
+ struct ib_class_port_info cpi = {};
+
+ cpi.capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH;
+ memcpy(out_mad, &cpi, sizeof(cpi));
+ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
}
static int iboe_process_mad(struct ib_device *ibdev, int mad_flags, u8 port_num,
@@ -842,6 +873,9 @@
if (in_mad->mad_hdr.mgmt_class != IB_MGMT_CLASS_PERF_MGMT)
return -EINVAL;
+ if (in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)
+ return iboe_process_mad_port_info((void *)(out_mad->data + 40));
+
memset(&counter_stats, 0, sizeof(counter_stats));
mutex_lock(&dev->counters_table[port_num - 1].mutex);
list_for_each_entry(tmp_counter,
@@ -863,7 +897,8 @@
switch (counter_stats.counter_mode & 0xf) {
case 0:
edit_counter(&counter_stats,
- (void *)(out_mad->data + 40));
+ (void *)(out_mad->data + 40),
+ in_mad->mad_hdr.attr_id);
err = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY;
break;
default:
@@ -894,8 +929,10 @@
*/
if (link == IB_LINK_LAYER_INFINIBAND) {
if (mlx4_is_slave(dev->dev) &&
- in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
- in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS)
+ (in_mad->mad_hdr.mgmt_class == IB_MGMT_CLASS_PERF_MGMT &&
+ (in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS ||
+ in_mad->mad_hdr.attr_id == IB_PMA_PORT_COUNTERS_EXT ||
+ in_mad->mad_hdr.attr_id == IB_PMA_CLASS_PORT_INFO)))
return iboe_process_mad(ibdev, mad_flags, port_num, in_wc,
in_grh, in_mad, out_mad);
diff --git a/drivers/infiniband/hw/mlx4/qp.c b/drivers/infiniband/hw/mlx4/qp.c
index bc5536f..fd97534 100644
--- a/drivers/infiniband/hw/mlx4/qp.c
+++ b/drivers/infiniband/hw/mlx4/qp.c
@@ -1681,9 +1681,12 @@
}
if (qp->ibqp.uobject)
- context->usr_page = cpu_to_be32(to_mucontext(ibqp->uobject->context)->uar.index);
+ context->usr_page = cpu_to_be32(
+ mlx4_to_hw_uar_index(dev->dev,
+ to_mucontext(ibqp->uobject->context)->uar.index));
else
- context->usr_page = cpu_to_be32(dev->priv_uar.index);
+ context->usr_page = cpu_to_be32(
+ mlx4_to_hw_uar_index(dev->dev, dev->priv_uar.index));
if (attr_mask & IB_QP_DEST_QPN)
context->remote_qpn = cpu_to_be32(attr->dest_qp_num);
diff --git a/drivers/infiniband/hw/mlx5/qp.c b/drivers/infiniband/hw/mlx5/qp.c
index 9116bc3..34cb8e8 100644
--- a/drivers/infiniband/hw/mlx5/qp.c
+++ b/drivers/infiniband/hw/mlx5/qp.c
@@ -270,8 +270,10 @@
/* fall through */
case IB_QPT_RC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_atomic_seg) +
- sizeof(struct mlx5_wqe_raddr_seg);
+ max(sizeof(struct mlx5_wqe_atomic_seg) +
+ sizeof(struct mlx5_wqe_raddr_seg),
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg));
break;
case IB_QPT_XRC_TGT:
@@ -279,9 +281,9 @@
case IB_QPT_UC:
size += sizeof(struct mlx5_wqe_ctrl_seg) +
- sizeof(struct mlx5_wqe_raddr_seg) +
- sizeof(struct mlx5_wqe_umr_ctrl_seg) +
- sizeof(struct mlx5_mkey_seg);
+ max(sizeof(struct mlx5_wqe_raddr_seg),
+ sizeof(struct mlx5_wqe_umr_ctrl_seg) +
+ sizeof(struct mlx5_mkey_seg));
break;
case IB_QPT_UD:
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma.h b/drivers/infiniband/hw/ocrdma/ocrdma.h
index 040bb8b..12503f1 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma.h
@@ -323,9 +323,6 @@
*/
u32 max_hw_cqe;
bool phase_change;
- bool deferred_arm, deferred_sol;
- bool first_arm;
-
spinlock_t cq_lock ____cacheline_aligned; /* provide synchronization
* to cq polling
*/
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_main.c b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
index 5738493..f387430 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_main.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_main.c
@@ -228,6 +228,11 @@
ocrdma_alloc_pd_pool(dev);
+ if (!ocrdma_alloc_stats_resources(dev)) {
+ pr_err("%s: stats resource allocation failed\n", __func__);
+ goto alloc_err;
+ }
+
spin_lock_init(&dev->av_tbl.lock);
spin_lock_init(&dev->flush_q_lock);
return 0;
@@ -238,6 +243,7 @@
static void ocrdma_free_resources(struct ocrdma_dev *dev)
{
+ ocrdma_release_stats_resources(dev);
kfree(dev->stag_arr);
kfree(dev->qp_tbl);
kfree(dev->cq_tbl);
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
index 86c303a..255f774 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.c
@@ -64,10 +64,11 @@
return cpy_len;
}
-static bool ocrdma_alloc_stats_mem(struct ocrdma_dev *dev)
+bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev)
{
struct stats_mem *mem = &dev->stats_mem;
+ mutex_init(&dev->stats_lock);
/* Alloc mbox command mem*/
mem->size = max_t(u32, sizeof(struct ocrdma_rdma_stats_req),
sizeof(struct ocrdma_rdma_stats_resp));
@@ -91,13 +92,14 @@
return true;
}
-static void ocrdma_release_stats_mem(struct ocrdma_dev *dev)
+void ocrdma_release_stats_resources(struct ocrdma_dev *dev)
{
struct stats_mem *mem = &dev->stats_mem;
if (mem->va)
dma_free_coherent(&dev->nic_info.pdev->dev, mem->size,
mem->va, mem->pa);
+ mem->va = NULL;
kfree(mem->debugfs_mem);
}
@@ -838,15 +840,9 @@
&dev->reset_stats, &ocrdma_dbg_ops))
goto err;
- /* Now create dma_mem for stats mbx command */
- if (!ocrdma_alloc_stats_mem(dev))
- goto err;
-
- mutex_init(&dev->stats_lock);
return;
err:
- ocrdma_release_stats_mem(dev);
debugfs_remove_recursive(dev->dir);
dev->dir = NULL;
}
@@ -855,9 +851,7 @@
{
if (!dev->dir)
return;
- debugfs_remove(dev->dir);
- mutex_destroy(&dev->stats_lock);
- ocrdma_release_stats_mem(dev);
+ debugfs_remove_recursive(dev->dir);
}
void ocrdma_init_debugfs(void)
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
index c9e58d0..bba1fec 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_stats.h
@@ -65,6 +65,8 @@
void ocrdma_rem_debugfs(void);
void ocrdma_init_debugfs(void);
+bool ocrdma_alloc_stats_resources(struct ocrdma_dev *dev);
+void ocrdma_release_stats_resources(struct ocrdma_dev *dev);
void ocrdma_rem_port_stats(struct ocrdma_dev *dev);
void ocrdma_add_port_stats(struct ocrdma_dev *dev);
int ocrdma_pma_counters(struct ocrdma_dev *dev,
diff --git a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
index d4c687b..12420e4 100644
--- a/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
+++ b/drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
@@ -125,8 +125,8 @@
IB_DEVICE_SYS_IMAGE_GUID |
IB_DEVICE_LOCAL_DMA_LKEY |
IB_DEVICE_MEM_MGT_EXTENSIONS;
- attr->max_sge = min(dev->attr.max_send_sge, dev->attr.max_srq_sge);
- attr->max_sge_rd = 0;
+ attr->max_sge = dev->attr.max_send_sge;
+ attr->max_sge_rd = attr->max_sge;
attr->max_cq = dev->attr.max_cq;
attr->max_cqe = dev->attr.max_cqe;
attr->max_mr = dev->attr.max_mr;
@@ -1094,7 +1094,6 @@
spin_lock_init(&cq->comp_handler_lock);
INIT_LIST_HEAD(&cq->sq_head);
INIT_LIST_HEAD(&cq->rq_head);
- cq->first_arm = true;
if (ib_ctx) {
uctx = get_ocrdma_ucontext(ib_ctx);
@@ -2726,8 +2725,7 @@
OCRDMA_CQE_UD_STATUS_MASK) >> OCRDMA_CQE_UD_STATUS_SHIFT;
ibwc->src_qp = le32_to_cpu(cqe->flags_status_srcqpn) &
OCRDMA_CQE_SRCQP_MASK;
- ibwc->pkey_index = le32_to_cpu(cqe->ud.rxlen_pkey) &
- OCRDMA_CQE_PKEY_MASK;
+ ibwc->pkey_index = 0;
ibwc->wc_flags = IB_WC_GRH;
ibwc->byte_len = (le32_to_cpu(cqe->ud.rxlen_pkey) >>
OCRDMA_CQE_UD_XFER_LEN_SHIFT);
@@ -2911,12 +2909,9 @@
}
stop_cqe:
cq->getp = cur_getp;
- if (cq->deferred_arm || polled_hw_cqes) {
- ocrdma_ring_cq_db(dev, cq->id, cq->deferred_arm,
- cq->deferred_sol, polled_hw_cqes);
- cq->deferred_arm = false;
- cq->deferred_sol = false;
- }
+
+ if (polled_hw_cqes)
+ ocrdma_ring_cq_db(dev, cq->id, false, false, polled_hw_cqes);
return i;
}
@@ -3000,13 +2995,7 @@
if (cq_flags & IB_CQ_SOLICITED)
sol_needed = true;
- if (cq->first_arm) {
- ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
- cq->first_arm = false;
- }
-
- cq->deferred_arm = true;
- cq->deferred_sol = sol_needed;
+ ocrdma_ring_cq_db(dev, cq_id, arm_needed, sol_needed, 0);
spin_unlock_irqrestore(&cq->cq_lock, flags);
return 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_ib.c b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
index 5ea0c14..fa9c42f 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_ib.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_ib.c
@@ -245,8 +245,6 @@
skb_reset_mac_header(skb);
skb_pull(skb, IPOIB_ENCAP_LEN);
- skb->truesize = SKB_TRUESIZE(skb->len);
-
++dev->stats.rx_packets;
dev->stats.rx_bytes += skb->len;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 050dfa1..2588931 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -456,7 +456,10 @@
return status;
}
-static void ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
+/*
+ * Caller must hold 'priv->lock'
+ */
+static int ipoib_mcast_join(struct net_device *dev, struct ipoib_mcast *mcast)
{
struct ipoib_dev_priv *priv = netdev_priv(dev);
struct ib_sa_multicast *multicast;
@@ -466,6 +469,10 @@
ib_sa_comp_mask comp_mask;
int ret = 0;
+ if (!priv->broadcast ||
+ !test_bit(IPOIB_FLAG_OPER_UP, &priv->flags))
+ return -EINVAL;
+
ipoib_dbg_mcast(priv, "joining MGID %pI6\n", mcast->mcmember.mgid.raw);
rec.mgid = mcast->mcmember.mgid;
@@ -525,20 +532,23 @@
rec.join_state = 4;
#endif
}
+ spin_unlock_irq(&priv->lock);
multicast = ib_sa_join_multicast(&ipoib_sa_client, priv->ca, priv->port,
&rec, comp_mask, GFP_KERNEL,
ipoib_mcast_join_complete, mcast);
+ spin_lock_irq(&priv->lock);
if (IS_ERR(multicast)) {
ret = PTR_ERR(multicast);
ipoib_warn(priv, "ib_sa_join_multicast failed, status %d\n", ret);
- spin_lock_irq(&priv->lock);
/* Requeue this join task with a backoff delay */
__ipoib_mcast_schedule_join_thread(priv, mcast, 1);
clear_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
spin_unlock_irq(&priv->lock);
complete(&mcast->done);
+ spin_lock_irq(&priv->lock);
}
+ return 0;
}
void ipoib_mcast_join_task(struct work_struct *work)
@@ -620,9 +630,10 @@
/* Found the next unjoined group */
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
- spin_unlock_irq(&priv->lock);
- ipoib_mcast_join(dev, mcast);
- spin_lock_irq(&priv->lock);
+ if (ipoib_mcast_join(dev, mcast)) {
+ spin_unlock_irq(&priv->lock);
+ return;
+ }
} else if (!delay_until ||
time_before(mcast->delay_until, delay_until))
delay_until = mcast->delay_until;
@@ -641,10 +652,9 @@
if (mcast) {
init_completion(&mcast->done);
set_bit(IPOIB_MCAST_FLAG_BUSY, &mcast->flags);
+ ipoib_mcast_join(dev, mcast);
}
spin_unlock_irq(&priv->lock);
- if (mcast)
- ipoib_mcast_join(dev, mcast);
}
int ipoib_mcast_start_thread(struct net_device *dev)
diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
index 6727954..e8a84d1 100644
--- a/drivers/input/joystick/xpad.c
+++ b/drivers/input/joystick/xpad.c
@@ -1207,7 +1207,6 @@
#else
static int xpad_led_probe(struct usb_xpad *xpad) { return 0; }
static void xpad_led_disconnect(struct usb_xpad *xpad) { }
-static void xpad_identify_controller(struct usb_xpad *xpad) { }
#endif
static int xpad_start_input(struct usb_xpad *xpad)
diff --git a/drivers/input/keyboard/adp5589-keys.c b/drivers/input/keyboard/adp5589-keys.c
index 4d446d5..c01a1d6 100644
--- a/drivers/input/keyboard/adp5589-keys.c
+++ b/drivers/input/keyboard/adp5589-keys.c
@@ -235,7 +235,7 @@
unsigned short gpimapsize;
unsigned extend_cfg;
bool is_adp5585;
- bool adp5585_support_row5;
+ bool support_row5;
#ifdef CONFIG_GPIOLIB
unsigned char gpiomap[ADP5589_MAXGPIO];
bool export_gpio;
@@ -485,7 +485,7 @@
if (kpad->extend_cfg & C4_EXTEND_CFG)
pin_used[kpad->var->c4_extend_cfg] = true;
- if (!kpad->adp5585_support_row5)
+ if (!kpad->support_row5)
pin_used[5] = true;
for (i = 0; i < kpad->var->maxgpio; i++)
@@ -884,12 +884,13 @@
switch (id->driver_data) {
case ADP5585_02:
- kpad->adp5585_support_row5 = true;
+ kpad->support_row5 = true;
case ADP5585_01:
kpad->is_adp5585 = true;
kpad->var = &const_adp5585;
break;
case ADP5589:
+ kpad->support_row5 = true;
kpad->var = &const_adp5589;
break;
}
diff --git a/drivers/input/keyboard/cap11xx.c b/drivers/input/keyboard/cap11xx.c
index 378db10..4401be2 100644
--- a/drivers/input/keyboard/cap11xx.c
+++ b/drivers/input/keyboard/cap11xx.c
@@ -304,8 +304,10 @@
led->cdev.brightness = LED_OFF;
error = of_property_read_u32(child, "reg", ®);
- if (error != 0 || reg >= num_leds)
+ if (error != 0 || reg >= num_leds) {
+ of_node_put(child);
return -EINVAL;
+ }
led->reg = reg;
led->priv = priv;
@@ -313,8 +315,10 @@
INIT_WORK(&led->work, cap11xx_led_work);
error = devm_led_classdev_register(dev, &led->cdev);
- if (error)
+ if (error) {
+ of_node_put(child);
return error;
+ }
priv->num_leds++;
led++;
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index d6d16fa..1f2337a 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -733,7 +733,7 @@
module will be called xen-kbdfront.
config INPUT_SIRFSOC_ONKEY
- bool "CSR SiRFSoC power on/off/suspend key support"
+ tristate "CSR SiRFSoC power on/off/suspend key support"
depends on ARCH_SIRF && OF
default y
help
diff --git a/drivers/input/misc/sirfsoc-onkey.c b/drivers/input/misc/sirfsoc-onkey.c
index 9d5b89b..ed7237f 100644
--- a/drivers/input/misc/sirfsoc-onkey.c
+++ b/drivers/input/misc/sirfsoc-onkey.c
@@ -101,7 +101,7 @@
static const struct of_device_id sirfsoc_pwrc_of_match[] = {
{ .compatible = "sirf,prima2-pwrc" },
{},
-}
+};
MODULE_DEVICE_TABLE(of, sirfsoc_pwrc_of_match);
static int sirfsoc_pwrc_probe(struct platform_device *pdev)
diff --git a/drivers/input/mouse/vmmouse.c b/drivers/input/mouse/vmmouse.c
index e272f06..a3f0f5a 100644
--- a/drivers/input/mouse/vmmouse.c
+++ b/drivers/input/mouse/vmmouse.c
@@ -458,8 +458,6 @@
priv->abs_dev = abs_dev;
psmouse->private = priv;
- input_set_capability(rel_dev, EV_REL, REL_WHEEL);
-
/* Set up and register absolute device */
snprintf(priv->phys, sizeof(priv->phys), "%s/input1",
psmouse->ps2dev.serio->phys);
@@ -475,10 +473,6 @@
abs_dev->id.version = psmouse->model;
abs_dev->dev.parent = &psmouse->ps2dev.serio->dev;
- error = input_register_device(priv->abs_dev);
- if (error)
- goto init_fail;
-
/* Set absolute device capabilities */
input_set_capability(abs_dev, EV_KEY, BTN_LEFT);
input_set_capability(abs_dev, EV_KEY, BTN_RIGHT);
@@ -488,6 +482,13 @@
input_set_abs_params(abs_dev, ABS_X, 0, VMMOUSE_MAX_X, 0, 0);
input_set_abs_params(abs_dev, ABS_Y, 0, VMMOUSE_MAX_Y, 0, 0);
+ error = input_register_device(priv->abs_dev);
+ if (error)
+ goto init_fail;
+
+ /* Add wheel capability to the relative device */
+ input_set_capability(rel_dev, EV_REL, REL_WHEEL);
+
psmouse->protocol_handler = vmmouse_process_byte;
psmouse->disconnect = vmmouse_disconnect;
psmouse->reconnect = vmmouse_reconnect;
diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
index 8f82897..1ca7f55 100644
--- a/drivers/input/serio/serio.c
+++ b/drivers/input/serio/serio.c
@@ -134,7 +134,7 @@
int error;
error = device_attach(&serio->dev);
- if (error < 0)
+ if (error < 0 && error != -EPROBE_DEFER)
dev_warn(&serio->dev,
"device_attach() failed for %s (%s), error: %d\n",
serio->phys, serio->name, error);
diff --git a/drivers/input/touchscreen/colibri-vf50-ts.c b/drivers/input/touchscreen/colibri-vf50-ts.c
index 5d4903a..69828d0 100644
--- a/drivers/input/touchscreen/colibri-vf50-ts.c
+++ b/drivers/input/touchscreen/colibri-vf50-ts.c
@@ -21,6 +21,7 @@
#include <linux/interrupt.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/of.h>
#include <linux/pinctrl/consumer.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
diff --git a/drivers/input/touchscreen/edt-ft5x06.c b/drivers/input/touchscreen/edt-ft5x06.c
index 0b0f8c1..23fbe38 100644
--- a/drivers/input/touchscreen/edt-ft5x06.c
+++ b/drivers/input/touchscreen/edt-ft5x06.c
@@ -822,16 +822,22 @@
int error;
error = device_property_read_u32(dev, "threshold", &val);
- if (!error)
- reg_addr->reg_threshold = val;
+ if (!error) {
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_threshold, val);
+ tsdata->threshold = val;
+ }
error = device_property_read_u32(dev, "gain", &val);
- if (!error)
- reg_addr->reg_gain = val;
+ if (!error) {
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_gain, val);
+ tsdata->gain = val;
+ }
error = device_property_read_u32(dev, "offset", &val);
- if (!error)
- reg_addr->reg_offset = val;
+ if (!error) {
+ edt_ft5x06_register_write(tsdata, reg_addr->reg_offset, val);
+ tsdata->offset = val;
+ }
}
static void
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 62a400c..fb092f3 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -1353,7 +1353,7 @@
raw_spin_lock_irqsave(&iommu->register_lock, flags);
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_QIES))
goto end;
diff --git a/drivers/iommu/intel-svm.c b/drivers/iommu/intel-svm.c
index 5046483..d9939fa 100644
--- a/drivers/iommu/intel-svm.c
+++ b/drivers/iommu/intel-svm.c
@@ -249,12 +249,30 @@
static void intel_mm_release(struct mmu_notifier *mn, struct mm_struct *mm)
{
struct intel_svm *svm = container_of(mn, struct intel_svm, notifier);
+ struct intel_svm_dev *sdev;
+ /* This might end up being called from exit_mmap(), *before* the page
+ * tables are cleared. And __mmu_notifier_release() will delete us from
+ * the list of notifiers so that our invalidate_range() callback doesn't
+ * get called when the page tables are cleared. So we need to protect
+ * against hardware accessing those page tables.
+ *
+ * We do it by clearing the entry in the PASID table and then flushing
+ * the IOTLB and the PASID table caches. This might upset hardware;
+ * perhaps we'll want to point the PASID to a dummy PGD (like the zero
+ * page) so that we end up taking a fault that the hardware really
+ * *has* to handle gracefully without affecting other processes.
+ */
svm->iommu->pasid_table[svm->pasid].val = 0;
+ wmb();
- /* There's no need to do any flush because we can't get here if there
- * are any devices left anyway. */
- WARN_ON(!list_empty(&svm->devs));
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdev, &svm->devs, list) {
+ intel_flush_pasid_dev(svm, sdev, svm->pasid);
+ intel_flush_svm_range_dev(svm, sdev, 0, -1, 0, !svm->mm);
+ }
+ rcu_read_unlock();
+
}
static const struct mmu_notifier_ops intel_mmuops = {
@@ -379,7 +397,6 @@
goto out;
}
iommu->pasid_table[svm->pasid].val = (u64)__pa(mm->pgd) | 1;
- mm = NULL;
} else
iommu->pasid_table[svm->pasid].val = (u64)__pa(init_mm.pgd) | 1 | (1ULL << 11);
wmb();
@@ -442,11 +459,11 @@
kfree_rcu(sdev, rcu);
if (list_empty(&svm->devs)) {
- mmu_notifier_unregister(&svm->notifier, svm->mm);
idr_remove(&svm->iommu->pasid_idr, svm->pasid);
if (svm->mm)
- mmput(svm->mm);
+ mmu_notifier_unregister(&svm->notifier, svm->mm);
+
/* We mandate that no page faults may be outstanding
* for the PASID when intel_svm_unbind_mm() is called.
* If that is not obeyed, subtle errors will happen.
@@ -507,6 +524,10 @@
struct intel_svm *svm = NULL;
int head, tail, handled = 0;
+ /* Clear PPR bit before reading head/tail registers, to
+ * ensure that we get a new interrupt if needed. */
+ writel(DMA_PRS_PPR, iommu->reg + DMAR_PRS_REG);
+
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
while (head != tail) {
@@ -551,6 +572,9 @@
* any faults on kernel addresses. */
if (!svm->mm)
goto bad_req;
+ /* If the mm is already defunct, don't handle faults. */
+ if (!atomic_inc_not_zero(&svm->mm->mm_users))
+ goto bad_req;
down_read(&svm->mm->mmap_sem);
vma = find_extend_vma(svm->mm, address);
if (!vma || address < vma->vm_start)
@@ -567,6 +591,7 @@
result = QI_RESP_SUCCESS;
invalid:
up_read(&svm->mm->mmap_sem);
+ mmput(svm->mm);
bad_req:
/* Accounting for major/minor faults? */
rcu_read_lock();
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index c12ba45..ac59692 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -629,7 +629,7 @@
raw_spin_lock_irqsave(&iommu->register_lock, flags);
- sts = dmar_readq(iommu->reg + DMAR_GSTS_REG);
+ sts = readl(iommu->reg + DMAR_GSTS_REG);
if (!(sts & DMA_GSTS_IRES))
goto end;
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 3447549..43dfd15 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -66,7 +66,10 @@
unsigned long phys_base;
struct its_cmd_block *cmd_base;
struct its_cmd_block *cmd_write;
- void *tables[GITS_BASER_NR_REGS];
+ struct {
+ void *base;
+ u32 order;
+ } tables[GITS_BASER_NR_REGS];
struct its_collection *collections;
struct list_head its_device_list;
u64 flags;
@@ -75,6 +78,9 @@
#define ITS_ITT_ALIGN SZ_256
+/* Convert page order to size in bytes */
+#define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
+
struct event_lpi_map {
unsigned long *lpi_map;
u16 *col_map;
@@ -597,11 +603,6 @@
lpi_set_config(d, true);
}
-static void its_eoi_irq(struct irq_data *d)
-{
- gic_write_eoir(d->hwirq);
-}
-
static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
bool force)
{
@@ -638,7 +639,7 @@
.name = "ITS",
.irq_mask = its_mask_irq,
.irq_unmask = its_unmask_irq,
- .irq_eoi = its_eoi_irq,
+ .irq_eoi = irq_chip_eoi_parent,
.irq_set_affinity = its_set_affinity,
.irq_compose_msi_msg = its_irq_compose_msi_msg,
};
@@ -807,9 +808,10 @@
int i;
for (i = 0; i < GITS_BASER_NR_REGS; i++) {
- if (its->tables[i]) {
- free_page((unsigned long)its->tables[i]);
- its->tables[i] = NULL;
+ if (its->tables[i].base) {
+ free_pages((unsigned long)its->tables[i].base,
+ its->tables[i].order);
+ its->tables[i].base = NULL;
}
}
}
@@ -842,7 +844,6 @@
u64 type = GITS_BASER_TYPE(val);
u64 entry_size = GITS_BASER_ENTRY_SIZE(val);
int order = get_order(psz);
- int alloc_size;
int alloc_pages;
u64 tmp;
void *base;
@@ -874,9 +875,8 @@
}
}
- alloc_size = (1 << order) * PAGE_SIZE;
retry_alloc_baser:
- alloc_pages = (alloc_size / psz);
+ alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
if (alloc_pages > GITS_BASER_PAGES_MAX) {
alloc_pages = GITS_BASER_PAGES_MAX;
order = get_order(GITS_BASER_PAGES_MAX * psz);
@@ -890,7 +890,8 @@
goto out_free;
}
- its->tables[i] = base;
+ its->tables[i].base = base;
+ its->tables[i].order = order;
retry_baser:
val = (virt_to_phys(base) |
@@ -928,7 +929,7 @@
shr = tmp & GITS_BASER_SHAREABILITY_MASK;
if (!shr) {
cache = GITS_BASER_nC;
- __flush_dcache_area(base, alloc_size);
+ __flush_dcache_area(base, PAGE_ORDER_TO_SIZE(order));
}
goto retry_baser;
}
@@ -940,7 +941,7 @@
* something is horribly wrong...
*/
free_pages((unsigned long)base, order);
- its->tables[i] = NULL;
+ its->tables[i].base = NULL;
switch (psz) {
case SZ_16K:
@@ -961,7 +962,7 @@
}
pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n",
- (int)(alloc_size / entry_size),
+ (int)(PAGE_ORDER_TO_SIZE(order) / entry_size),
its_base_type_string[type],
(unsigned long)virt_to_phys(base),
psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
index 911758c..8f9ebf7 100644
--- a/drivers/irqchip/irq-gic.c
+++ b/drivers/irqchip/irq-gic.c
@@ -384,9 +384,6 @@
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoi_irq,
.irq_set_type = gic_set_type,
-#ifdef CONFIG_SMP
- .irq_set_affinity = gic_set_affinity,
-#endif
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.flags = IRQCHIP_SET_TYPE_MASKED |
@@ -400,9 +397,6 @@
.irq_unmask = gic_unmask_irq,
.irq_eoi = gic_eoimode1_eoi_irq,
.irq_set_type = gic_set_type,
-#ifdef CONFIG_SMP
- .irq_set_affinity = gic_set_affinity,
-#endif
.irq_get_irqchip_state = gic_irq_get_irqchip_state,
.irq_set_irqchip_state = gic_irq_set_irqchip_state,
.irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity,
@@ -443,7 +437,7 @@
u32 bypass = 0;
u32 mode = 0;
- if (static_key_true(&supports_deactivate))
+ if (gic == &gic_data[0] && static_key_true(&supports_deactivate))
mode = GIC_CPU_CTRL_EOImodeNS;
/*
@@ -1039,6 +1033,11 @@
gic->chip.name = kasprintf(GFP_KERNEL, "GIC-%d", gic_nr);
}
+#ifdef CONFIG_SMP
+ if (gic_nr == 0)
+ gic->chip.irq_set_affinity = gic_set_affinity;
+#endif
+
#ifdef CONFIG_GIC_NON_BANKED
if (percpu_offset) { /* Frankein-GIC without banked registers... */
unsigned int cpu;
diff --git a/drivers/irqchip/irq-sun4i.c b/drivers/irqchip/irq-sun4i.c
index 0704362..376b280 100644
--- a/drivers/irqchip/irq-sun4i.c
+++ b/drivers/irqchip/irq-sun4i.c
@@ -22,7 +22,6 @@
#include <linux/of_irq.h>
#include <asm/exception.h>
-#include <asm/mach/irq.h>
#define SUN4I_IRQ_VECTOR_REG 0x00
#define SUN4I_IRQ_PROTECTION_REG 0x08
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 2a506fe..d1f8ab9 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -373,13 +373,7 @@
static void gigaset_device_release(struct device *dev)
{
- struct cardstate *cs = dev_get_drvdata(dev);
-
- if (!cs)
- return;
- dev_set_drvdata(dev, NULL);
- kfree(cs->hw.ser);
- cs->hw.ser = NULL;
+ kfree(container_of(dev, struct ser_cardstate, dev.dev));
}
/*
@@ -408,7 +402,6 @@
cs->hw.ser = NULL;
return rc;
}
- dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
tasklet_init(&cs->write_tasklet,
gigaset_modem_fill, (unsigned long) cs);
diff --git a/drivers/isdn/hardware/mISDN/netjet.c b/drivers/isdn/hardware/mISDN/netjet.c
index 8e29447..afde4ed 100644
--- a/drivers/isdn/hardware/mISDN/netjet.c
+++ b/drivers/isdn/hardware/mISDN/netjet.c
@@ -392,7 +392,7 @@
}
stat = bchannel_get_rxbuf(&bc->bch, cnt);
/* only transparent use the count here, HDLC overun is detected later */
- if (stat == ENOMEM) {
+ if (stat == -ENOMEM) {
pr_warning("%s.B%d: No memory for %d bytes\n",
card->name, bc->bch.nr, cnt);
return;
diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c
index 33224cb..9f6acd5 100644
--- a/drivers/lightnvm/core.c
+++ b/drivers/lightnvm/core.c
@@ -572,11 +572,13 @@
}
}
- ret = nvm_get_sysblock(dev, &dev->sb);
- if (!ret)
- pr_err("nvm: device not initialized.\n");
- else if (ret < 0)
- pr_err("nvm: err (%d) on device initialization\n", ret);
+ if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
+ ret = nvm_get_sysblock(dev, &dev->sb);
+ if (!ret)
+ pr_err("nvm: device not initialized.\n");
+ else if (ret < 0)
+ pr_err("nvm: err (%d) on device initialization\n", ret);
+ }
/* register device with a supported media manager */
down_write(&nvm_lock);
@@ -1055,9 +1057,11 @@
strncpy(info.mmtype, init->mmtype, NVM_MMTYPE_LEN);
info.fs_ppa.ppa = -1;
- ret = nvm_init_sysblock(dev, &info);
- if (ret)
- return ret;
+ if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) {
+ ret = nvm_init_sysblock(dev, &info);
+ if (ret)
+ return ret;
+ }
memcpy(&dev->sb, &info, sizeof(struct nvm_sb_info));
@@ -1117,7 +1121,10 @@
dev->mt = NULL;
}
- return nvm_dev_factory(dev, fact.flags);
+ if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT)
+ return nvm_dev_factory(dev, fact.flags);
+
+ return 0;
}
static long nvm_ctl_ioctl(struct file *file, uint cmd, unsigned long arg)
diff --git a/drivers/lightnvm/rrpc.c b/drivers/lightnvm/rrpc.c
index d8c7595..307db1e 100644
--- a/drivers/lightnvm/rrpc.c
+++ b/drivers/lightnvm/rrpc.c
@@ -300,8 +300,10 @@
}
page = mempool_alloc(rrpc->page_pool, GFP_NOIO);
- if (!page)
+ if (!page) {
+ bio_put(bio);
return -ENOMEM;
+ }
while ((slot = find_first_zero_bit(rblk->invalid_pages,
nr_pgs_per_blk)) < nr_pgs_per_blk) {
diff --git a/drivers/lightnvm/rrpc.h b/drivers/lightnvm/rrpc.h
index ef13ac7..f7b3733 100644
--- a/drivers/lightnvm/rrpc.h
+++ b/drivers/lightnvm/rrpc.h
@@ -174,8 +174,7 @@
static inline int request_intersects(struct rrpc_inflight_rq *r,
sector_t laddr_start, sector_t laddr_end)
{
- return (laddr_end >= r->l_start && laddr_end <= r->l_end) &&
- (laddr_start >= r->l_start && laddr_start <= r->l_end);
+ return (laddr_end >= r->l_start) && (laddr_start <= r->l_end);
}
static int __rrpc_lock_laddr(struct rrpc *rrpc, sector_t laddr,
@@ -184,6 +183,8 @@
sector_t laddr_end = laddr + pages - 1;
struct rrpc_inflight_rq *rtmp;
+ WARN_ON(irqs_disabled());
+
spin_lock_irq(&rrpc->inflights.lock);
list_for_each_entry(rtmp, &rrpc->inflights.reqs, list) {
if (unlikely(request_intersects(rtmp, laddr, laddr_end))) {
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 5df4048..dd83492 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -1191,6 +1191,8 @@
if (clone)
free_rq_clone(clone);
+ else if (!tio->md->queue->mq_ops)
+ free_rq_tio(tio);
}
/*
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index e6e4bac..12099b0 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2048,6 +2048,7 @@
return 0;
}
+EXPORT_SYMBOL_GPL(db8500_prcmu_config_hotmon);
static int config_hot_period(u16 val)
{
@@ -2074,11 +2075,13 @@
return config_hot_period(cycles32k);
}
+EXPORT_SYMBOL_GPL(db8500_prcmu_start_temp_sense);
int db8500_prcmu_stop_temp_sense(void)
{
return config_hot_period(0xFFFF);
}
+EXPORT_SYMBOL_GPL(db8500_prcmu_stop_temp_sense);
static int prcmu_a9wdog(u8 cmd, u8 d0, u8 d1, u8 d2, u8 d3)
{
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index 677d0362..80f9afc 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -458,7 +458,11 @@
{
struct mei_cl *cl = file->private_data;
- return mei_cl_notify_request(cl, file, request);
+ if (request != MEI_HBM_NOTIFICATION_START &&
+ request != MEI_HBM_NOTIFICATION_STOP)
+ return -EINVAL;
+
+ return mei_cl_notify_request(cl, file, (u8)request);
}
/**
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 5914263..fe207e5 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -47,13 +47,10 @@
#include "queue.h"
MODULE_ALIAS("mmc:block");
-
-#ifdef KERNEL
#ifdef MODULE_PARAM_PREFIX
#undef MODULE_PARAM_PREFIX
#endif
#define MODULE_PARAM_PREFIX "mmcblk."
-#endif
#define INAND_CMD38_ARG_EXT_CSD 113
#define INAND_CMD38_ARG_ERASE 0x00
@@ -655,8 +652,10 @@
}
md = mmc_blk_get(bdev->bd_disk);
- if (!md)
+ if (!md) {
+ err = -EINVAL;
goto cmd_err;
+ }
card = md->queue.card;
if (IS_ERR(card)) {
diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c
index 1c1b45e..3446097 100644
--- a/drivers/mmc/host/mmc_spi.c
+++ b/drivers/mmc/host/mmc_spi.c
@@ -925,6 +925,10 @@
dma_addr = dma_map_page(dma_dev, sg_page(sg), 0,
PAGE_SIZE, dir);
+ if (dma_mapping_error(dma_dev, dma_addr)) {
+ data->error = -EFAULT;
+ break;
+ }
if (direction == DMA_TO_DEVICE)
t->tx_dma = dma_addr + sg->offset;
else
@@ -1393,10 +1397,12 @@
host->dma_dev = dev;
host->ones_dma = dma_map_single(dev, ones,
MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
+ if (dma_mapping_error(dev, host->ones_dma))
+ goto fail_ones_dma;
host->data_dma = dma_map_single(dev, host->data,
sizeof(*host->data), DMA_BIDIRECTIONAL);
-
- /* REVISIT in theory those map operations can fail... */
+ if (dma_mapping_error(dev, host->data_dma))
+ goto fail_data_dma;
dma_sync_single_for_cpu(host->dma_dev,
host->data_dma, sizeof(*host->data),
@@ -1462,6 +1468,11 @@
if (host->dma_dev)
dma_unmap_single(host->dma_dev, host->data_dma,
sizeof(*host->data), DMA_BIDIRECTIONAL);
+fail_data_dma:
+ if (host->dma_dev)
+ dma_unmap_single(host->dma_dev, host->ones_dma,
+ MMC_SPI_BLOCKSIZE, DMA_TO_DEVICE);
+fail_ones_dma:
kfree(host->data);
fail_nobuf1:
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index b6639ea..f6e4d97 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -2232,6 +2232,7 @@
dma_release_channel(host->tx_chan);
if (host->rx_chan)
dma_release_channel(host->rx_chan);
+ pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
if (host->dbclk)
@@ -2253,6 +2254,7 @@
dma_release_channel(host->tx_chan);
dma_release_channel(host->rx_chan);
+ pm_runtime_dont_use_autosuspend(host->dev);
pm_runtime_put_sync(host->dev);
pm_runtime_disable(host->dev);
device_init_wakeup(&pdev->dev, false);
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index ce08896..da82477 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -86,7 +86,7 @@
static inline void pxamci_init_ocr(struct pxamci_host *host)
{
#ifdef CONFIG_REGULATOR
- host->vcc = regulator_get_optional(mmc_dev(host->mmc), "vmmc");
+ host->vcc = devm_regulator_get_optional(mmc_dev(host->mmc), "vmmc");
if (IS_ERR(host->vcc))
host->vcc = NULL;
@@ -654,12 +654,8 @@
r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
irq = platform_get_irq(pdev, 0);
- if (!r || irq < 0)
- return -ENXIO;
-
- r = request_mem_region(r->start, SZ_4K, DRIVER_NAME);
- if (!r)
- return -EBUSY;
+ if (irq < 0)
+ return irq;
mmc = mmc_alloc_host(sizeof(struct pxamci_host), &pdev->dev);
if (!mmc) {
@@ -695,7 +691,7 @@
host->pdata = pdev->dev.platform_data;
host->clkrt = CLKRT_OFF;
- host->clk = clk_get(&pdev->dev, NULL);
+ host->clk = devm_clk_get(&pdev->dev, NULL);
if (IS_ERR(host->clk)) {
ret = PTR_ERR(host->clk);
host->clk = NULL;
@@ -727,9 +723,9 @@
host->irq = irq;
host->imask = MMC_I_MASK_ALL;
- host->base = ioremap(r->start, SZ_4K);
- if (!host->base) {
- ret = -ENOMEM;
+ host->base = devm_ioremap_resource(&pdev->dev, r);
+ if (IS_ERR(host->base)) {
+ ret = PTR_ERR(host->base);
goto out;
}
@@ -742,7 +738,8 @@
writel(64, host->base + MMC_RESTO);
writel(host->imask, host->base + MMC_I_MASK);
- ret = request_irq(host->irq, pxamci_irq, 0, DRIVER_NAME, host);
+ ret = devm_request_irq(&pdev->dev, host->irq, pxamci_irq, 0,
+ DRIVER_NAME, host);
if (ret)
goto out;
@@ -804,7 +801,7 @@
dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_ro);
goto out;
} else {
- mmc->caps |= host->pdata->gpio_card_ro_invert ?
+ mmc->caps2 |= host->pdata->gpio_card_ro_invert ?
0 : MMC_CAP2_RO_ACTIVE_HIGH;
}
@@ -833,14 +830,9 @@
dma_release_channel(host->dma_chan_rx);
if (host->dma_chan_tx)
dma_release_channel(host->dma_chan_tx);
- if (host->base)
- iounmap(host->base);
- if (host->clk)
- clk_put(host->clk);
}
if (mmc)
mmc_free_host(mmc);
- release_resource(r);
return ret;
}
@@ -859,9 +851,6 @@
gpio_ro = host->pdata->gpio_card_ro;
gpio_power = host->pdata->gpio_power;
}
- if (host->vcc)
- regulator_put(host->vcc);
-
if (host->pdata && host->pdata->exit)
host->pdata->exit(&pdev->dev, mmc);
@@ -870,16 +859,10 @@
END_CMD_RES|PRG_DONE|DATA_TRAN_DONE,
host->base + MMC_I_MASK);
- free_irq(host->irq, host);
dmaengine_terminate_all(host->dma_chan_rx);
dmaengine_terminate_all(host->dma_chan_tx);
dma_release_channel(host->dma_chan_rx);
dma_release_channel(host->dma_chan_tx);
- iounmap(host->base);
-
- clk_put(host->clk);
-
- release_resource(host->res);
mmc_free_host(mmc);
}
diff --git a/drivers/mmc/host/sdhci-acpi.c b/drivers/mmc/host/sdhci-acpi.c
index f6047fc..a5cda92 100644
--- a/drivers/mmc/host/sdhci-acpi.c
+++ b/drivers/mmc/host/sdhci-acpi.c
@@ -146,6 +146,33 @@
.ops = &sdhci_acpi_ops_int,
};
+static int bxt_get_cd(struct mmc_host *mmc)
+{
+ int gpio_cd = mmc_gpio_get_cd(mmc);
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!gpio_cd)
+ return 0;
+
+ pm_runtime_get_sync(mmc->parent);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pm_runtime_mark_last_busy(mmc->parent);
+ pm_runtime_put_autosuspend(mmc->parent);
+
+ return ret;
+}
+
static int sdhci_acpi_emmc_probe_slot(struct platform_device *pdev,
const char *hid, const char *uid)
{
@@ -196,6 +223,9 @@
/* Platform specific code during sd probe slot goes here */
+ if (hid && !strcmp(hid, "80865ACA"))
+ host->mmc_host_ops.get_cd = bxt_get_cd;
+
return 0;
}
diff --git a/drivers/mmc/host/sdhci-of-at91.c b/drivers/mmc/host/sdhci-of-at91.c
index 7e7d8f0..9cb86fb 100644
--- a/drivers/mmc/host/sdhci-of-at91.c
+++ b/drivers/mmc/host/sdhci-of-at91.c
@@ -217,6 +217,7 @@
pm_runtime_disable:
pm_runtime_disable(&pdev->dev);
pm_runtime_set_suspended(&pdev->dev);
+ pm_runtime_put_noidle(&pdev->dev);
clocks_disable_unprepare:
clk_disable_unprepare(priv->gck);
clk_disable_unprepare(priv->mainck);
diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c
index cc851b0..df3b8ec 100644
--- a/drivers/mmc/host/sdhci-pci-core.c
+++ b/drivers/mmc/host/sdhci-pci-core.c
@@ -330,6 +330,33 @@
sdhci_pci_spt_drive_strength = 0x10 | ((val >> 12) & 0xf);
}
+static int bxt_get_cd(struct mmc_host *mmc)
+{
+ int gpio_cd = mmc_gpio_get_cd(mmc);
+ struct sdhci_host *host = mmc_priv(mmc);
+ unsigned long flags;
+ int ret = 0;
+
+ if (!gpio_cd)
+ return 0;
+
+ pm_runtime_get_sync(mmc->parent);
+
+ spin_lock_irqsave(&host->lock, flags);
+
+ if (host->flags & SDHCI_DEVICE_DEAD)
+ goto out;
+
+ ret = !!(sdhci_readl(host, SDHCI_PRESENT_STATE) & SDHCI_CARD_PRESENT);
+out:
+ spin_unlock_irqrestore(&host->lock, flags);
+
+ pm_runtime_mark_last_busy(mmc->parent);
+ pm_runtime_put_autosuspend(mmc->parent);
+
+ return ret;
+}
+
static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot)
{
slot->host->mmc->caps |= MMC_CAP_8_BIT_DATA | MMC_CAP_NONREMOVABLE |
@@ -362,6 +389,10 @@
slot->cd_con_id = NULL;
slot->cd_idx = 0;
slot->cd_override_level = true;
+ if (slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_BXT_SD ||
+ slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_APL_SD)
+ slot->host->mmc_host_ops.get_cd = bxt_get_cd;
+
return 0;
}
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index d622435..add9fdf 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -1360,7 +1360,7 @@
sdhci_runtime_pm_get(host);
/* Firstly check card presence */
- present = sdhci_do_get_cd(host);
+ present = mmc->ops->get_cd(mmc);
spin_lock_irqsave(&host->lock, flags);
@@ -2849,6 +2849,8 @@
host = mmc_priv(mmc);
host->mmc = mmc;
+ host->mmc_host_ops = sdhci_ops;
+ mmc->ops = &host->mmc_host_ops;
return host;
}
@@ -3037,7 +3039,6 @@
/*
* Set host parameters.
*/
- mmc->ops = &sdhci_ops;
max_clk = host->max_clk;
if (host->ops->get_min_clock)
diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h
index 7654ae5..0115e99 100644
--- a/drivers/mmc/host/sdhci.h
+++ b/drivers/mmc/host/sdhci.h
@@ -430,6 +430,7 @@
/* Internal data */
struct mmc_host *mmc; /* MMC structure */
+ struct mmc_host_ops mmc_host_ops; /* MMC host ops */
u64 dma_mask; /* custom DMA mask */
#if defined(CONFIG_LEDS_CLASS) || defined(CONFIG_LEDS_CLASS_MODULE)
diff --git a/drivers/mmc/host/sh_mmcif.c b/drivers/mmc/host/sh_mmcif.c
index 1ca8a13..6234eab3 100644
--- a/drivers/mmc/host/sh_mmcif.c
+++ b/drivers/mmc/host/sh_mmcif.c
@@ -445,7 +445,7 @@
pdata->slave_id_rx);
} else {
host->chan_tx = dma_request_slave_channel(dev, "tx");
- host->chan_tx = dma_request_slave_channel(dev, "rx");
+ host->chan_rx = dma_request_slave_channel(dev, "rx");
}
dev_dbg(dev, "%s: got channel TX %p RX %p\n", __func__, host->chan_tx,
host->chan_rx);
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 56b5605..b7f1a99 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -214,6 +214,8 @@
static struct rtnl_link_stats64 *bond_get_stats(struct net_device *bond_dev,
struct rtnl_link_stats64 *stats);
static void bond_slave_arr_handler(struct work_struct *work);
+static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
+ int mod);
/*---------------------------- General routines -----------------------------*/
@@ -2127,6 +2129,7 @@
continue;
case BOND_LINK_UP:
+ bond_update_speed_duplex(slave);
bond_set_slave_link_state(slave, BOND_LINK_UP,
BOND_SLAVE_NOTIFY_NOW);
slave->last_link_up = jiffies;
@@ -2459,7 +2462,7 @@
struct slave *slave)
{
struct arphdr *arp = (struct arphdr *)skb->data;
- struct slave *curr_active_slave;
+ struct slave *curr_active_slave, *curr_arp_slave;
unsigned char *arp_ptr;
__be32 sip, tip;
int alen, is_arp = skb->protocol == __cpu_to_be16(ETH_P_ARP);
@@ -2506,26 +2509,41 @@
&sip, &tip);
curr_active_slave = rcu_dereference(bond->curr_active_slave);
+ curr_arp_slave = rcu_dereference(bond->current_arp_slave);
- /* Backup slaves won't see the ARP reply, but do come through
- * here for each ARP probe (so we swap the sip/tip to validate
- * the probe). In a "redundant switch, common router" type of
- * configuration, the ARP probe will (hopefully) travel from
- * the active, through one switch, the router, then the other
- * switch before reaching the backup.
+ /* We 'trust' the received ARP enough to validate it if:
*
- * We 'trust' the arp requests if there is an active slave and
- * it received valid arp reply(s) after it became active. This
- * is done to avoid endless looping when we can't reach the
+ * (a) the slave receiving the ARP is active (which includes the
+ * current ARP slave, if any), or
+ *
+ * (b) the receiving slave isn't active, but there is a currently
+ * active slave and it received valid arp reply(s) after it became
+ * the currently active slave, or
+ *
+ * (c) there is an ARP slave that sent an ARP during the prior ARP
+ * interval, and we receive an ARP reply on any slave. We accept
+ * these because switch FDB update delays may deliver the ARP
+ * reply to a slave other than the sender of the ARP request.
+ *
+ * Note: for (b), backup slaves are receiving the broadcast ARP
+ * request, not a reply. This request passes from the sending
+ * slave through the L2 switch(es) to the receiving slave. Since
+ * this is checking the request, sip/tip are swapped for
+ * validation.
+ *
+ * This is done to avoid endless looping when we can't reach the
* arp_ip_target and fool ourselves with our own arp requests.
*/
-
if (bond_is_active_slave(slave))
bond_validate_arp(bond, slave, sip, tip);
else if (curr_active_slave &&
time_after(slave_last_rx(bond, curr_active_slave),
curr_active_slave->last_link_up))
bond_validate_arp(bond, slave, tip, sip);
+ else if (curr_arp_slave && (arp->ar_op == htons(ARPOP_REPLY)) &&
+ bond_time_in_interval(bond,
+ dev_trans_start(curr_arp_slave->dev), 1))
+ bond_validate_arp(bond, slave, sip, tip);
out_unlock:
if (arp != (struct arphdr *)skb->data)
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index fc5b756..eb7192f 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -117,6 +117,9 @@
*/
#define EMS_USB_ARM7_CLOCK 8000000
+#define CPC_TX_QUEUE_TRIGGER_LOW 25
+#define CPC_TX_QUEUE_TRIGGER_HIGH 35
+
/*
* CAN-Message representation in a CPC_MSG. Message object type is
* CPC_MSG_TYPE_CAN_FRAME or CPC_MSG_TYPE_RTR_FRAME or
@@ -278,6 +281,11 @@
switch (urb->status) {
case 0:
dev->free_slots = dev->intr_in_buffer[1];
+ if(dev->free_slots > CPC_TX_QUEUE_TRIGGER_HIGH){
+ if (netif_queue_stopped(netdev)){
+ netif_wake_queue(netdev);
+ }
+ }
break;
case -ECONNRESET: /* unlink */
@@ -526,8 +534,6 @@
/* Release context */
context->echo_index = MAX_TX_URBS;
- if (netif_queue_stopped(netdev))
- netif_wake_queue(netdev);
}
/*
@@ -587,7 +593,7 @@
int err, i;
dev->intr_in_buffer[0] = 0;
- dev->free_slots = 15; /* initial size */
+ dev->free_slots = 50; /* initial size */
for (i = 0; i < MAX_RX_URBS; i++) {
struct urb *urb = NULL;
@@ -835,7 +841,7 @@
/* Slow down tx path */
if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
- dev->free_slots < 5) {
+ dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
netif_stop_queue(netdev);
}
}
diff --git a/drivers/net/dsa/mv88e6352.c b/drivers/net/dsa/mv88e6352.c
index cc6c545..a47f52f 100644
--- a/drivers/net/dsa/mv88e6352.c
+++ b/drivers/net/dsa/mv88e6352.c
@@ -25,6 +25,7 @@
static const struct mv88e6xxx_switch_id mv88e6352_table[] = {
{ PORT_SWITCH_ID_6172, "Marvell 88E6172" },
{ PORT_SWITCH_ID_6176, "Marvell 88E6176" },
+ { PORT_SWITCH_ID_6240, "Marvell 88E6240" },
{ PORT_SWITCH_ID_6320, "Marvell 88E6320" },
{ PORT_SWITCH_ID_6320_A1, "Marvell 88E6320 (A1)" },
{ PORT_SWITCH_ID_6320_A2, "Marvell 88e6320 (A2)" },
diff --git a/drivers/net/dsa/mv88e6xxx.c b/drivers/net/dsa/mv88e6xxx.c
index cf34681..512c8c0 100644
--- a/drivers/net/dsa/mv88e6xxx.c
+++ b/drivers/net/dsa/mv88e6xxx.c
@@ -1555,7 +1555,7 @@
if (vlan.vid != vid || !vlan.valid ||
vlan.data[port] == GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER)
- return -ENOENT;
+ return -EOPNOTSUPP;
vlan.data[port] = GLOBAL_VTU_DATA_MEMBER_TAG_NON_MEMBER;
@@ -1582,6 +1582,7 @@
const struct switchdev_obj_port_vlan *vlan)
{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
+ const u16 defpvid = 4000 + ds->index * DSA_MAX_PORTS + port;
u16 pvid, vid;
int err = 0;
@@ -1597,7 +1598,8 @@
goto unlock;
if (vid == pvid) {
- err = _mv88e6xxx_port_pvid_set(ds, port, 0);
+ /* restore reserved VLAN ID */
+ err = _mv88e6xxx_port_pvid_set(ds, port, defpvid);
if (err)
goto unlock;
}
@@ -1889,26 +1891,20 @@
int mv88e6xxx_port_bridge_join(struct dsa_switch *ds, int port, u32 members)
{
- struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
- const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
- int err;
-
- /* The port joined a bridge, so leave its reserved VLAN */
- mutex_lock(&ps->smi_mutex);
- err = _mv88e6xxx_port_vlan_del(ds, port, pvid);
- if (!err)
- err = _mv88e6xxx_port_pvid_set(ds, port, 0);
- mutex_unlock(&ps->smi_mutex);
- return err;
+ return 0;
}
int mv88e6xxx_port_bridge_leave(struct dsa_switch *ds, int port, u32 members)
{
+ return 0;
+}
+
+static int mv88e6xxx_setup_port_default_vlan(struct dsa_switch *ds, int port)
+{
struct mv88e6xxx_priv_state *ps = ds_to_priv(ds);
const u16 pvid = 4000 + ds->index * DSA_MAX_PORTS + port;
int err;
- /* The port left the bridge, so join its reserved VLAN */
mutex_lock(&ps->smi_mutex);
err = _mv88e6xxx_port_vlan_add(ds, port, pvid, true);
if (!err)
@@ -2192,8 +2188,7 @@
if (dsa_is_cpu_port(ds, i) || dsa_is_dsa_port(ds, i))
continue;
- /* setup the unbridged state */
- ret = mv88e6xxx_port_bridge_leave(ds, i, 0);
+ ret = mv88e6xxx_setup_port_default_vlan(ds, i);
if (ret < 0)
return ret;
}
diff --git a/drivers/net/ethernet/8390/pcnet_cs.c b/drivers/net/ethernet/8390/pcnet_cs.c
index 2777289..2f79d29 100644
--- a/drivers/net/ethernet/8390/pcnet_cs.c
+++ b/drivers/net/ethernet/8390/pcnet_cs.c
@@ -1501,6 +1501,7 @@
PCMCIA_DEVICE_MANF_CARD(0x026f, 0x030a),
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1103),
PCMCIA_DEVICE_MANF_CARD(0x0274, 0x1121),
+ PCMCIA_DEVICE_MANF_CARD(0xc001, 0x0009),
PCMCIA_DEVICE_PROD_ID12("2408LAN", "Ethernet", 0x352fff7f, 0x00b2e941),
PCMCIA_DEVICE_PROD_ID1234("Socket", "CF 10/100 Ethernet Card", "Revision B", "05/11/06", 0xb38bcc2e, 0x4de88352, 0xeaca6c8d, 0x7e57c22e),
PCMCIA_DEVICE_PROD_ID123("Cardwell", "PCMCIA", "ETHERNET", 0x9533672e, 0x281f1c5d, 0x3ff7175b),
diff --git a/drivers/net/ethernet/agere/et131x.c b/drivers/net/ethernet/agere/et131x.c
index 3f3bcbe..0907ab6 100644
--- a/drivers/net/ethernet/agere/et131x.c
+++ b/drivers/net/ethernet/agere/et131x.c
@@ -2380,7 +2380,7 @@
sizeof(u32),
&tx_ring->tx_status_pa,
GFP_KERNEL);
- if (!tx_ring->tx_status_pa) {
+ if (!tx_ring->tx_status) {
dev_err(&adapter->pdev->dev,
"Cannot alloc memory for Tx status block\n");
return -ENOMEM;
diff --git a/drivers/net/ethernet/amd/am79c961a.c b/drivers/net/ethernet/amd/am79c961a.c
index 87e727b..fcdf5dd 100644
--- a/drivers/net/ethernet/amd/am79c961a.c
+++ b/drivers/net/ethernet/amd/am79c961a.c
@@ -50,8 +50,8 @@
static void write_rreg(u_long base, u_int reg, u_int val)
{
asm volatile(
- "str%?h %1, [%2] @ NET_RAP\n\t"
- "str%?h %0, [%2, #-4] @ NET_RDP"
+ "strh %1, [%2] @ NET_RAP\n\t"
+ "strh %0, [%2, #-4] @ NET_RDP"
:
: "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
}
@@ -60,8 +60,8 @@
{
unsigned short v;
asm volatile(
- "str%?h %1, [%2] @ NET_RAP\n\t"
- "ldr%?h %0, [%2, #-4] @ NET_RDP"
+ "strh %1, [%2] @ NET_RAP\n\t"
+ "ldrh %0, [%2, #-4] @ NET_RDP"
: "=r" (v)
: "r" (reg), "r" (ISAIO_BASE + 0x0464));
return v;
@@ -70,8 +70,8 @@
static inline void write_ireg(u_long base, u_int reg, u_int val)
{
asm volatile(
- "str%?h %1, [%2] @ NET_RAP\n\t"
- "str%?h %0, [%2, #8] @ NET_IDP"
+ "strh %1, [%2] @ NET_RAP\n\t"
+ "strh %0, [%2, #8] @ NET_IDP"
:
: "r" (val), "r" (reg), "r" (ISAIO_BASE + 0x0464));
}
@@ -80,8 +80,8 @@
{
u_short v;
asm volatile(
- "str%?h %1, [%2] @ NAT_RAP\n\t"
- "ldr%?h %0, [%2, #8] @ NET_IDP\n\t"
+ "strh %1, [%2] @ NAT_RAP\n\t"
+ "ldrh %0, [%2, #8] @ NET_IDP\n\t"
: "=r" (v)
: "r" (reg), "r" (ISAIO_BASE + 0x0464));
return v;
@@ -96,7 +96,7 @@
offset = ISAMEM_BASE + (offset << 1);
length = (length + 1) & ~1;
if ((int)buf & 2) {
- asm volatile("str%?h %2, [%0], #4"
+ asm volatile("strh %2, [%0], #4"
: "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
buf += 2;
length -= 2;
@@ -104,20 +104,20 @@
while (length > 8) {
register unsigned int tmp asm("r2"), tmp2 asm("r3");
asm volatile(
- "ldm%?ia %0!, {%1, %2}"
+ "ldmia %0!, {%1, %2}"
: "+r" (buf), "=&r" (tmp), "=&r" (tmp2));
length -= 8;
asm volatile(
- "str%?h %1, [%0], #4\n\t"
- "mov%? %1, %1, lsr #16\n\t"
- "str%?h %1, [%0], #4\n\t"
- "str%?h %2, [%0], #4\n\t"
- "mov%? %2, %2, lsr #16\n\t"
- "str%?h %2, [%0], #4"
+ "strh %1, [%0], #4\n\t"
+ "mov %1, %1, lsr #16\n\t"
+ "strh %1, [%0], #4\n\t"
+ "strh %2, [%0], #4\n\t"
+ "mov %2, %2, lsr #16\n\t"
+ "strh %2, [%0], #4"
: "+r" (offset), "=&r" (tmp), "=&r" (tmp2));
}
while (length > 0) {
- asm volatile("str%?h %2, [%0], #4"
+ asm volatile("strh %2, [%0], #4"
: "=&r" (offset) : "0" (offset), "r" (buf[0] | (buf[1] << 8)));
buf += 2;
length -= 2;
@@ -132,23 +132,23 @@
if ((int)buf & 2) {
unsigned int tmp;
asm volatile(
- "ldr%?h %2, [%0], #4\n\t"
- "str%?b %2, [%1], #1\n\t"
- "mov%? %2, %2, lsr #8\n\t"
- "str%?b %2, [%1], #1"
+ "ldrh %2, [%0], #4\n\t"
+ "strb %2, [%1], #1\n\t"
+ "mov %2, %2, lsr #8\n\t"
+ "strb %2, [%1], #1"
: "=&r" (offset), "=&r" (buf), "=r" (tmp): "0" (offset), "1" (buf));
length -= 2;
}
while (length > 8) {
register unsigned int tmp asm("r2"), tmp2 asm("r3"), tmp3;
asm volatile(
- "ldr%?h %2, [%0], #4\n\t"
- "ldr%?h %4, [%0], #4\n\t"
- "ldr%?h %3, [%0], #4\n\t"
- "orr%? %2, %2, %4, lsl #16\n\t"
- "ldr%?h %4, [%0], #4\n\t"
- "orr%? %3, %3, %4, lsl #16\n\t"
- "stm%?ia %1!, {%2, %3}"
+ "ldrh %2, [%0], #4\n\t"
+ "ldrh %4, [%0], #4\n\t"
+ "ldrh %3, [%0], #4\n\t"
+ "orr %2, %2, %4, lsl #16\n\t"
+ "ldrh %4, [%0], #4\n\t"
+ "orr %3, %3, %4, lsl #16\n\t"
+ "stmia %1!, {%2, %3}"
: "=&r" (offset), "=&r" (buf), "=r" (tmp), "=r" (tmp2), "=r" (tmp3)
: "0" (offset), "1" (buf));
length -= 8;
@@ -156,10 +156,10 @@
while (length > 0) {
unsigned int tmp;
asm volatile(
- "ldr%?h %2, [%0], #4\n\t"
- "str%?b %2, [%1], #1\n\t"
- "mov%? %2, %2, lsr #8\n\t"
- "str%?b %2, [%1], #1"
+ "ldrh %2, [%0], #4\n\t"
+ "strb %2, [%1], #1\n\t"
+ "mov %2, %2, lsr #8\n\t"
+ "strb %2, [%1], #1"
: "=&r" (offset), "=&r" (buf), "=r" (tmp) : "0" (offset), "1" (buf));
length -= 2;
}
diff --git a/drivers/net/ethernet/amd/lance.c b/drivers/net/ethernet/amd/lance.c
index 256f590..3a7ebfd 100644
--- a/drivers/net/ethernet/amd/lance.c
+++ b/drivers/net/ethernet/amd/lance.c
@@ -547,8 +547,8 @@
/* Make certain the data structures used by the LANCE are aligned and DMAble. */
lp = kzalloc(sizeof(*lp), GFP_DMA | GFP_KERNEL);
- if(lp==NULL)
- return -ENODEV;
+ if (!lp)
+ return -ENOMEM;
if (lance_debug > 6) printk(" (#0x%05lx)", (unsigned long)lp);
dev->ml_priv = lp;
lp->name = chipname;
diff --git a/drivers/net/ethernet/arc/emac_main.c b/drivers/net/ethernet/arc/emac_main.c
index abe1eab..6446af1 100644
--- a/drivers/net/ethernet/arc/emac_main.c
+++ b/drivers/net/ethernet/arc/emac_main.c
@@ -163,7 +163,7 @@
struct sk_buff *skb = tx_buff->skb;
unsigned int info = le32_to_cpu(txbd->info);
- if ((info & FOR_EMAC) || !txbd->data)
+ if ((info & FOR_EMAC) || !txbd->data || !skb)
break;
if (unlikely(info & (DROP | DEFR | LTCL | UFLO))) {
@@ -191,6 +191,7 @@
txbd->data = 0;
txbd->info = 0;
+ tx_buff->skb = NULL;
*txbd_dirty = (*txbd_dirty + 1) % TX_BD_NUM;
}
@@ -446,6 +447,9 @@
*last_rx_bd = (*last_rx_bd + 1) % RX_BD_NUM;
}
+ priv->txbd_curr = 0;
+ priv->txbd_dirty = 0;
+
/* Clean Tx BD's */
memset(priv->txbd, 0, TX_RING_SZ);
@@ -514,6 +518,64 @@
}
/**
+ * arc_free_tx_queue - free skb from tx queue
+ * @ndev: Pointer to the network device.
+ *
+ * This function must be called while EMAC disable
+ */
+static void arc_free_tx_queue(struct net_device *ndev)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+ unsigned int i;
+
+ for (i = 0; i < TX_BD_NUM; i++) {
+ struct arc_emac_bd *txbd = &priv->txbd[i];
+ struct buffer_state *tx_buff = &priv->tx_buff[i];
+
+ if (tx_buff->skb) {
+ dma_unmap_single(&ndev->dev, dma_unmap_addr(tx_buff, addr),
+ dma_unmap_len(tx_buff, len), DMA_TO_DEVICE);
+
+ /* return the sk_buff to system */
+ dev_kfree_skb_irq(tx_buff->skb);
+ }
+
+ txbd->info = 0;
+ txbd->data = 0;
+ tx_buff->skb = NULL;
+ }
+}
+
+/**
+ * arc_free_rx_queue - free skb from rx queue
+ * @ndev: Pointer to the network device.
+ *
+ * This function must be called while EMAC disable
+ */
+static void arc_free_rx_queue(struct net_device *ndev)
+{
+ struct arc_emac_priv *priv = netdev_priv(ndev);
+ unsigned int i;
+
+ for (i = 0; i < RX_BD_NUM; i++) {
+ struct arc_emac_bd *rxbd = &priv->rxbd[i];
+ struct buffer_state *rx_buff = &priv->rx_buff[i];
+
+ if (rx_buff->skb) {
+ dma_unmap_single(&ndev->dev, dma_unmap_addr(rx_buff, addr),
+ dma_unmap_len(rx_buff, len), DMA_FROM_DEVICE);
+
+ /* return the sk_buff to system */
+ dev_kfree_skb_irq(rx_buff->skb);
+ }
+
+ rxbd->info = 0;
+ rxbd->data = 0;
+ rx_buff->skb = NULL;
+ }
+}
+
+/**
* arc_emac_stop - Close the network device.
* @ndev: Pointer to the network device.
*
@@ -534,6 +596,10 @@
/* Disable EMAC */
arc_reg_clr(priv, R_CTRL, EN_MASK);
+ /* Return the sk_buff to system */
+ arc_free_tx_queue(ndev);
+ arc_free_rx_queue(ndev);
+
return 0;
}
@@ -610,7 +676,6 @@
dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);
- priv->tx_buff[*txbd_curr].skb = skb;
priv->txbd[*txbd_curr].data = cpu_to_le32(addr);
/* Make sure pointer to data buffer is set */
@@ -620,6 +685,11 @@
*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);
+ /* Make sure info word is set */
+ wmb();
+
+ priv->tx_buff[*txbd_curr].skb = skb;
+
/* Increment index to point to the next BD */
*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
index d946bba..1fb8010 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_link.c
@@ -6185,26 +6185,80 @@
shift -= 4;
digit = ((num & mask) >> shift);
if (digit == 0 && remove_leading_zeros) {
- mask = mask >> 4;
- continue;
- } else if (digit < 0xa)
- *str_ptr = digit + '0';
- else
- *str_ptr = digit - 0xa + 'a';
- remove_leading_zeros = 0;
- str_ptr++;
- (*len)--;
+ *str_ptr = '0';
+ } else {
+ if (digit < 0xa)
+ *str_ptr = digit + '0';
+ else
+ *str_ptr = digit - 0xa + 'a';
+
+ remove_leading_zeros = 0;
+ str_ptr++;
+ (*len)--;
+ }
mask = mask >> 4;
if (shift == 4*4) {
+ if (remove_leading_zeros) {
+ str_ptr++;
+ (*len)--;
+ }
*str_ptr = '.';
str_ptr++;
(*len)--;
remove_leading_zeros = 1;
}
}
+ if (remove_leading_zeros)
+ (*len)--;
return 0;
}
+static int bnx2x_3_seq_format_ver(u32 num, u8 *str, u16 *len)
+{
+ u8 *str_ptr = str;
+ u32 mask = 0x00f00000;
+ u8 shift = 8*3;
+ u8 digit;
+ u8 remove_leading_zeros = 1;
+
+ if (*len < 10) {
+ /* Need more than 10chars for this format */
+ *str_ptr = '\0';
+ (*len)--;
+ return -EINVAL;
+ }
+
+ while (shift > 0) {
+ shift -= 4;
+ digit = ((num & mask) >> shift);
+ if (digit == 0 && remove_leading_zeros) {
+ *str_ptr = '0';
+ } else {
+ if (digit < 0xa)
+ *str_ptr = digit + '0';
+ else
+ *str_ptr = digit - 0xa + 'a';
+
+ remove_leading_zeros = 0;
+ str_ptr++;
+ (*len)--;
+ }
+ mask = mask >> 4;
+ if ((shift == 4*4) || (shift == 4*2)) {
+ if (remove_leading_zeros) {
+ str_ptr++;
+ (*len)--;
+ }
+ *str_ptr = '.';
+ str_ptr++;
+ (*len)--;
+ remove_leading_zeros = 1;
+ }
+ }
+ if (remove_leading_zeros)
+ (*len)--;
+ return 0;
+}
static int bnx2x_null_format_ver(u32 spirom_ver, u8 *str, u16 *len)
{
@@ -9677,8 +9731,9 @@
if (bnx2x_is_8483x_8485x(phy)) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD, 0x400f, &fw_ver1);
- bnx2x_save_spirom_version(bp, port, fw_ver1 & 0xfff,
- phy->ver_addr);
+ if (phy->type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
+ fw_ver1 &= 0xfff;
+ bnx2x_save_spirom_version(bp, port, fw_ver1, phy->ver_addr);
} else {
/* For 32-bit registers in 848xx, access via MDIO2ARM i/f. */
/* (1) set reg 0xc200_0014(SPI_BRIDGE_CTRL_2) to 0x03000000 */
@@ -9732,16 +9787,32 @@
static void bnx2x_848xx_set_led(struct bnx2x *bp,
struct bnx2x_phy *phy)
{
- u16 val, offset, i;
+ u16 val, led3_blink_rate, offset, i;
static struct bnx2x_reg_set reg_set[] = {
{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED1_MASK, 0x0080},
{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED2_MASK, 0x0018},
{MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_MASK, 0x0006},
- {MDIO_PMA_DEVAD, MDIO_PMA_REG_8481_LED3_BLINK, 0x0000},
{MDIO_PMA_DEVAD, MDIO_PMA_REG_84823_CTL_SLOW_CLK_CNT_HIGH,
MDIO_PMA_REG_84823_BLINK_RATE_VAL_15P9HZ},
{MDIO_AN_DEVAD, 0xFFFB, 0xFFFD}
};
+
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ /* Set LED5 source */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED5_MASK,
+ 0x90);
+ led3_blink_rate = 0x000f;
+ } else {
+ led3_blink_rate = 0x0000;
+ }
+ /* Set LED3 BLINK */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_BLINK,
+ led3_blink_rate);
+
/* PHYC_CTL_LED_CTL */
bnx2x_cl45_read(bp, phy,
MDIO_PMA_DEVAD,
@@ -9749,6 +9820,9 @@
val &= 0xFE00;
val |= 0x0092;
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
+ val |= 2 << 12; /* LED5 ON based on source */
+
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LINK_SIGNAL, val);
@@ -9762,10 +9836,17 @@
else
offset = MDIO_PMA_REG_84823_CTL_LED_CTL_1;
- /* stretch_en for LED3*/
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858)
+ val = MDIO_PMA_REG_84858_ALLOW_GPHY_ACT |
+ MDIO_PMA_REG_84823_LED3_STRETCH_EN;
+ else
+ val = MDIO_PMA_REG_84823_LED3_STRETCH_EN;
+
+ /* stretch_en for LEDs */
bnx2x_cl45_read_or_write(bp, phy,
- MDIO_PMA_DEVAD, offset,
- MDIO_PMA_REG_84823_LED3_STRETCH_EN);
+ MDIO_PMA_DEVAD,
+ offset,
+ val);
}
static void bnx2x_848xx_specific_func(struct bnx2x_phy *phy,
@@ -9775,7 +9856,7 @@
struct bnx2x *bp = params->bp;
switch (action) {
case PHY_INIT:
- if (!bnx2x_is_8483x_8485x(phy)) {
+ if (bnx2x_is_8483x_8485x(phy)) {
/* Save spirom version */
bnx2x_save_848xx_spirom_version(phy, bp, params->port);
}
@@ -10036,15 +10117,20 @@
static int bnx2x_84833_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params, u16 fw_cmd,
- u16 cmd_args[], int argc)
+ u16 cmd_args[], int argc, int process)
{
int idx;
u16 val;
struct bnx2x *bp = params->bp;
- /* Write CMD_OPEN_OVERRIDE to STATUS reg */
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_848xx_CMD_HDLR_STATUS,
- PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+ int rc = 0;
+
+ if (process == PHY84833_MB_PROCESS2) {
+ /* Write CMD_OPEN_OVERRIDE to STATUS reg */
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_OPEN_OVERRIDE);
+ }
+
for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
MDIO_848xx_CMD_HDLR_STATUS, &val);
@@ -10054,15 +10140,27 @@
}
if (idx >= PHY848xx_CMDHDLR_WAIT) {
DP(NETIF_MSG_LINK, "FW cmd: FW not ready.\n");
+ /* if the status is CMD_COMPLETE_PASS or CMD_COMPLETE_ERROR
+ * clear the status to CMD_CLEAR_COMPLETE
+ */
+ if (val == PHY84833_STATUS_CMD_COMPLETE_PASS ||
+ val == PHY84833_STATUS_CMD_COMPLETE_ERROR) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
+ }
return -EINVAL;
}
-
- /* Prepare argument(s) and issue command */
- for (idx = 0; idx < argc; idx++) {
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_848xx_CMD_HDLR_DATA1 + idx,
- cmd_args[idx]);
+ if (process == PHY84833_MB_PROCESS1 ||
+ process == PHY84833_MB_PROCESS2) {
+ /* Prepare argument(s) */
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ cmd_args[idx]);
+ }
}
+
bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
MDIO_848xx_CMD_HDLR_COMMAND, fw_cmd);
for (idx = 0; idx < PHY848xx_CMDHDLR_WAIT; idx++) {
@@ -10076,24 +10174,30 @@
if ((idx >= PHY848xx_CMDHDLR_WAIT) ||
(val == PHY84833_STATUS_CMD_COMPLETE_ERROR)) {
DP(NETIF_MSG_LINK, "FW cmd failed.\n");
- return -EINVAL;
+ rc = -EINVAL;
}
- /* Gather returning data */
- for (idx = 0; idx < argc; idx++) {
- bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
- MDIO_848xx_CMD_HDLR_DATA1 + idx,
- &cmd_args[idx]);
+ if (process == PHY84833_MB_PROCESS3 && rc == 0) {
+ /* Gather returning data */
+ for (idx = 0; idx < argc; idx++) {
+ bnx2x_cl45_read(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_DATA1 + idx,
+ &cmd_args[idx]);
+ }
}
- bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
- MDIO_848xx_CMD_HDLR_STATUS,
- PHY84833_STATUS_CMD_CLEAR_COMPLETE);
- return 0;
+ if (val == PHY84833_STATUS_CMD_COMPLETE_ERROR ||
+ val == PHY84833_STATUS_CMD_COMPLETE_PASS) {
+ bnx2x_cl45_write(bp, phy, MDIO_CTL_DEVAD,
+ MDIO_848xx_CMD_HDLR_STATUS,
+ PHY84833_STATUS_CMD_CLEAR_COMPLETE);
+ }
+ return rc;
}
static int bnx2x_848xx_cmd_hdlr(struct bnx2x_phy *phy,
struct link_params *params,
u16 fw_cmd,
- u16 cmd_args[], int argc)
+ u16 cmd_args[], int argc,
+ int process)
{
struct bnx2x *bp = params->bp;
@@ -10106,7 +10210,7 @@
argc);
} else {
return bnx2x_84833_cmd_hdlr(phy, params, fw_cmd, cmd_args,
- argc);
+ argc, process);
}
}
@@ -10133,7 +10237,7 @@
status = bnx2x_848xx_cmd_hdlr(phy, params,
PHY848xx_CMD_SET_PAIR_SWAP, data,
- PHY848xx_CMDHDLR_MAX_ARGS);
+ 2, PHY84833_MB_PROCESS2);
if (status == 0)
DP(NETIF_MSG_LINK, "Pairswap OK, val=0x%x\n", data[1]);
@@ -10222,8 +10326,8 @@
DP(NETIF_MSG_LINK, "Don't Advertise 10GBase-T EEE\n");
/* Prevent Phy from working in EEE and advertising it */
- rc = bnx2x_848xx_cmd_hdlr(phy, params,
- PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
+ &cmd_args, 1, PHY84833_MB_PROCESS1);
if (rc) {
DP(NETIF_MSG_LINK, "EEE disable failed.\n");
return rc;
@@ -10240,8 +10344,8 @@
struct bnx2x *bp = params->bp;
u16 cmd_args = 1;
- rc = bnx2x_848xx_cmd_hdlr(phy, params,
- PHY848xx_CMD_SET_EEE_MODE, &cmd_args, 1);
+ rc = bnx2x_848xx_cmd_hdlr(phy, params, PHY848xx_CMD_SET_EEE_MODE,
+ &cmd_args, 1, PHY84833_MB_PROCESS1);
if (rc) {
DP(NETIF_MSG_LINK, "EEE enable failed.\n");
return rc;
@@ -10362,7 +10466,7 @@
cmd_args[3] = PHY84833_CONSTANT_LATENCY;
rc = bnx2x_848xx_cmd_hdlr(phy, params,
PHY848xx_CMD_SET_EEE_MODE, cmd_args,
- PHY848xx_CMDHDLR_MAX_ARGS);
+ 4, PHY84833_MB_PROCESS1);
if (rc)
DP(NETIF_MSG_LINK, "Cfg AutogrEEEn failed.\n");
}
@@ -10416,6 +10520,32 @@
vars->eee_status &= ~SHMEM_EEE_SUPPORTED_MASK;
}
+ if (phy->type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84833) {
+ /* Additional settings for jumbo packets in 1000BASE-T mode */
+ /* Allow rx extended length */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_AUX_CTRL, &val);
+ val |= 0x4000;
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_AUX_CTRL, val);
+ /* TX FIFO Elasticity LSB */
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_1G_100T_EXT_CTRL, &val);
+ val |= 0x1;
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_1G_100T_EXT_CTRL, val);
+ /* TX FIFO Elasticity MSB */
+ /* Enable expansion register 0x46 (Pattern Generator status) */
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_ACCESS, 0xf46);
+
+ bnx2x_cl45_read(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, &val);
+ val |= 0x4000;
+ bnx2x_cl45_write(bp, phy, MDIO_AN_DEVAD,
+ MDIO_AN_REG_8481_EXPANSION_REG_RD_RW, val);
+ }
+
if (bnx2x_is_8483x_8485x(phy)) {
/* Bring PHY out of super isolate mode as the final step. */
bnx2x_cl45_read_and_write(bp, phy,
@@ -10555,6 +10685,17 @@
return link_up;
}
+static int bnx2x_8485x_format_ver(u32 raw_ver, u8 *str, u16 *len)
+{
+ int status = 0;
+ u32 num;
+
+ num = ((raw_ver & 0xF80) >> 7) << 16 | ((raw_ver & 0x7F) << 8) |
+ ((raw_ver & 0xF000) >> 12);
+ status = bnx2x_3_seq_format_ver(num, str, len);
+ return status;
+}
+
static int bnx2x_848xx_format_ver(u32 raw_ver, u8 *str, u16 *len)
{
int status = 0;
@@ -10651,10 +10792,25 @@
0x0);
} else {
+ /* LED 1 OFF */
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_LED1_MASK,
0x0);
+
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ /* LED 2 OFF */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+ /* LED 3 OFF */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+ }
}
break;
case LED_MODE_FRONT_PANEL_OFF:
@@ -10713,6 +10869,19 @@
MDIO_PMA_REG_8481_SIGNAL_MASK,
0x0);
}
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ /* LED 2 OFF */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x0);
+ /* LED 3 OFF */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x0);
+ }
}
break;
case LED_MODE_ON:
@@ -10776,6 +10945,25 @@
params->port*4,
NIG_MASK_MI_INT);
}
+ }
+ if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ /* Tell LED3 to constant on */
+ bnx2x_cl45_read(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ &val);
+ val &= ~(7<<6);
+ val |= (2<<6); /* A83B[8:6]= 2 */
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LINK_SIGNAL,
+ val);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x20);
+ } else {
bnx2x_cl45_write(bp, phy,
MDIO_PMA_DEVAD,
MDIO_PMA_REG_8481_SIGNAL_MASK,
@@ -10854,6 +11042,17 @@
MDIO_PMA_REG_8481_LINK_SIGNAL,
val);
if (phy->type ==
+ PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84858) {
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED2_MASK,
+ 0x18);
+ bnx2x_cl45_write(bp, phy,
+ MDIO_PMA_DEVAD,
+ MDIO_PMA_REG_8481_LED3_MASK,
+ 0x06);
+ }
+ if (phy->type ==
PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84834) {
/* Restore LED4 source to external link,
* and re-enable interrupts.
@@ -11982,7 +12181,7 @@
.read_status = (read_status_t)bnx2x_848xx_read_status,
.link_reset = (link_reset_t)bnx2x_848x3_link_reset,
.config_loopback = (config_loopback_t)NULL,
- .format_fw_ver = (format_fw_ver_t)bnx2x_848xx_format_ver,
+ .format_fw_ver = (format_fw_ver_t)bnx2x_8485x_format_ver,
.hw_reset = (hw_reset_t)bnx2x_84833_hw_reset_phy,
.set_link_led = (set_link_led_t)bnx2x_848xx_set_link_led,
.phy_specific_func = (phy_specific_func_t)bnx2x_848xx_specific_func
@@ -13807,8 +14006,10 @@
if (CHIP_IS_E3(bp)) {
struct bnx2x_phy *phy = ¶ms->phy[INT_PHY];
bnx2x_set_aer_mmd(params, phy);
- if ((phy->supported & SUPPORTED_20000baseKR2_Full) &&
- (phy->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_20G))
+ if (((phy->req_line_speed == SPEED_AUTO_NEG) &&
+ (phy->speed_cap_mask &
+ PORT_HW_CFG_SPEED_CAPABILITY_D0_20G)) ||
+ (phy->req_line_speed == SPEED_20000))
bnx2x_check_kr2_wa(params, vars, phy);
bnx2x_check_over_curr(params, vars);
if (vars->rx_tx_asic_rst)
diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
index 4dead49..a43dea2 100644
--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_reg.h
@@ -7296,6 +7296,8 @@
#define MDIO_PMA_REG_84823_CTL_LED_CTL_1 0xa8e3
#define MDIO_PMA_REG_84833_CTL_LED_CTL_1 0xa8ec
#define MDIO_PMA_REG_84823_LED3_STRETCH_EN 0x0080
+/* BCM84858 only */
+#define MDIO_PMA_REG_84858_ALLOW_GPHY_ACT 0x8000
/* BCM84833 only */
#define MDIO_84833_TOP_CFG_FW_REV 0x400f
@@ -7337,6 +7339,10 @@
#define PHY84833_STATUS_CMD_NOT_OPEN_FOR_CMDS 0x0040
#define PHY84833_STATUS_CMD_CLEAR_COMPLETE 0x0080
#define PHY84833_STATUS_CMD_OPEN_OVERRIDE 0xa5a5
+/* Mailbox Process */
+#define PHY84833_MB_PROCESS1 1
+#define PHY84833_MB_PROCESS2 2
+#define PHY84833_MB_PROCESS3 3
/* Mailbox status set used by 84858 only */
#define PHY84858_STATUS_CMD_RECEIVED 0x0001
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 5dc89e5..8ab000d 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -69,7 +69,7 @@
#define BNXT_RX_DMA_OFFSET NET_SKB_PAD
#define BNXT_RX_COPY_THRESH 256
-#define BNXT_TX_PUSH_THRESH 92
+#define BNXT_TX_PUSH_THRESH 164
enum board_idx {
BCM57301,
@@ -223,11 +223,12 @@
}
if (free_size == bp->tx_ring_size && length <= bp->tx_push_thresh) {
- struct tx_push_bd *push = txr->tx_push;
- struct tx_bd *tx_push = &push->txbd1;
- struct tx_bd_ext *tx_push1 = &push->txbd2;
- void *pdata = tx_push1 + 1;
- int j;
+ struct tx_push_buffer *tx_push_buf = txr->tx_push;
+ struct tx_push_bd *tx_push = &tx_push_buf->push_bd;
+ struct tx_bd_ext *tx_push1 = &tx_push->txbd2;
+ void *pdata = tx_push_buf->data;
+ u64 *end;
+ int j, push_len;
/* Set COAL_NOW to be ready quickly for the next push */
tx_push->tx_bd_len_flags_type =
@@ -247,6 +248,9 @@
tx_push1->tx_bd_cfa_meta = cpu_to_le32(vlan_tag_flags);
tx_push1->tx_bd_cfa_action = cpu_to_le32(cfa_action);
+ end = PTR_ALIGN(pdata + length + 1, 8) - 1;
+ *end = 0;
+
skb_copy_from_linear_data(skb, pdata, len);
pdata += len;
for (j = 0; j < last_frag; j++) {
@@ -261,22 +265,29 @@
pdata += skb_frag_size(frag);
}
- memcpy(txbd, tx_push, sizeof(*txbd));
+ txbd->tx_bd_len_flags_type = tx_push->tx_bd_len_flags_type;
+ txbd->tx_bd_haddr = txr->data_mapping;
prod = NEXT_TX(prod);
txbd = &txr->tx_desc_ring[TX_RING(prod)][TX_IDX(prod)];
memcpy(txbd, tx_push1, sizeof(*txbd));
prod = NEXT_TX(prod);
- push->doorbell =
+ tx_push->doorbell =
cpu_to_le32(DB_KEY_TX_PUSH | DB_LONG_TX_PUSH | prod);
txr->tx_prod = prod;
netdev_tx_sent_queue(txq, skb->len);
- __iowrite64_copy(txr->tx_doorbell, push,
- (length + sizeof(*push) + 8) / 8);
+ push_len = (length + sizeof(*tx_push) + 7) / 8;
+ if (push_len > 16) {
+ __iowrite64_copy(txr->tx_doorbell, tx_push_buf, 16);
+ __iowrite64_copy(txr->tx_doorbell + 4, tx_push_buf + 1,
+ push_len - 16);
+ } else {
+ __iowrite64_copy(txr->tx_doorbell, tx_push_buf,
+ push_len);
+ }
tx_buf->is_push = 1;
-
goto tx_done;
}
@@ -1753,7 +1764,7 @@
push_size = L1_CACHE_ALIGN(sizeof(struct tx_push_bd) +
bp->tx_push_thresh);
- if (push_size > 128) {
+ if (push_size > 256) {
push_size = 0;
bp->tx_push_thresh = 0;
}
@@ -1772,7 +1783,6 @@
return rc;
if (bp->tx_push_size) {
- struct tx_bd *txbd;
dma_addr_t mapping;
/* One pre-allocated DMA buffer to backup
@@ -1786,13 +1796,11 @@
if (!txr->tx_push)
return -ENOMEM;
- txbd = &txr->tx_push->txbd1;
-
mapping = txr->tx_push_mapping +
sizeof(struct tx_push_bd);
- txbd->tx_bd_haddr = cpu_to_le64(mapping);
+ txr->data_mapping = cpu_to_le64(mapping);
- memset(txbd + 1, 0, sizeof(struct tx_bd_ext));
+ memset(txr->tx_push, 0, sizeof(struct tx_push_bd));
}
ring->queue_id = bp->q_info[j].queue_id;
if (i % bp->tx_nr_rings_per_tc == (bp->tx_nr_rings_per_tc - 1))
@@ -4546,20 +4554,18 @@
if (!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) &&
link_info->force_pause_setting != link_info->req_flow_ctrl)
update_pause = true;
- if (link_info->req_duplex != link_info->duplex_setting)
- update_link = true;
if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) {
if (BNXT_AUTO_MODE(link_info->auto_mode))
update_link = true;
if (link_info->req_link_speed != link_info->force_link_speed)
update_link = true;
+ if (link_info->req_duplex != link_info->duplex_setting)
+ update_link = true;
} else {
if (link_info->auto_mode == BNXT_LINK_AUTO_NONE)
update_link = true;
if (link_info->advertising != link_info->auto_link_speeds)
update_link = true;
- if (link_info->req_link_speed != link_info->auto_link_speed)
- update_link = true;
}
if (update_link)
@@ -4636,7 +4642,7 @@
if (link_re_init) {
rc = bnxt_update_phy_setting(bp);
if (rc)
- goto open_err;
+ netdev_warn(bp->dev, "failed to update phy settings\n");
}
if (irq_re_init) {
@@ -4654,6 +4660,7 @@
/* Enable TX queues */
bnxt_tx_enable(bp);
mod_timer(&bp->timer, jiffies + bp->current_interval);
+ bnxt_update_link(bp, true);
return 0;
@@ -5670,22 +5677,16 @@
}
/*initialize the ethool setting copy with NVM settings */
- if (BNXT_AUTO_MODE(link_info->auto_mode))
- link_info->autoneg |= BNXT_AUTONEG_SPEED;
-
- if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
- if (link_info->auto_pause_setting == BNXT_LINK_PAUSE_BOTH)
- link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
+ if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+ link_info->autoneg = BNXT_AUTONEG_SPEED |
+ BNXT_AUTONEG_FLOW_CTRL;
+ link_info->advertising = link_info->auto_link_speeds;
link_info->req_flow_ctrl = link_info->auto_pause_setting;
- } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ } else {
+ link_info->req_link_speed = link_info->force_link_speed;
+ link_info->req_duplex = link_info->duplex_setting;
link_info->req_flow_ctrl = link_info->force_pause_setting;
}
- link_info->req_duplex = link_info->duplex_setting;
- if (link_info->autoneg & BNXT_AUTONEG_SPEED)
- link_info->req_link_speed = link_info->auto_link_speed;
- else
- link_info->req_link_speed = link_info->force_link_speed;
- link_info->advertising = link_info->auto_link_speeds;
snprintf(phy_ver, PHY_VER_STR_LEN, " ph %d.%d.%d",
link_info->phy_ver[0],
link_info->phy_ver[1],
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
index 8af3ca8..2be51b3 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h
@@ -411,8 +411,8 @@
#define BNXT_NUM_TESTS(bp) 0
-#define BNXT_DEFAULT_RX_RING_SIZE 1023
-#define BNXT_DEFAULT_TX_RING_SIZE 512
+#define BNXT_DEFAULT_RX_RING_SIZE 511
+#define BNXT_DEFAULT_TX_RING_SIZE 511
#define MAX_TPA 64
@@ -523,10 +523,16 @@
struct tx_push_bd {
__le32 doorbell;
- struct tx_bd txbd1;
+ __le32 tx_bd_len_flags_type;
+ u32 tx_bd_opaque;
struct tx_bd_ext txbd2;
};
+struct tx_push_buffer {
+ struct tx_push_bd push_bd;
+ u32 data[25];
+};
+
struct bnxt_tx_ring_info {
struct bnxt_napi *bnapi;
u16 tx_prod;
@@ -538,8 +544,9 @@
dma_addr_t tx_desc_mapping[MAX_TX_PAGES];
- struct tx_push_bd *tx_push;
+ struct tx_push_buffer *tx_push;
dma_addr_t tx_push_mapping;
+ __le64 data_mapping;
#define BNXT_DEV_STATE_CLOSING 0x1
u32 dev_state;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
index 922b898..3238817 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c
@@ -486,15 +486,8 @@
speed_mask |= SUPPORTED_2500baseX_Full;
if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
speed_mask |= SUPPORTED_10000baseT_Full;
- /* TODO: support 25GB, 50GB with different cable type */
- if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
- speed_mask |= SUPPORTED_20000baseMLD2_Full |
- SUPPORTED_20000baseKR2_Full;
if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
- speed_mask |= SUPPORTED_40000baseKR4_Full |
- SUPPORTED_40000baseCR4_Full |
- SUPPORTED_40000baseSR4_Full |
- SUPPORTED_40000baseLR4_Full;
+ speed_mask |= SUPPORTED_40000baseCR4_Full;
return speed_mask;
}
@@ -514,15 +507,8 @@
speed_mask |= ADVERTISED_2500baseX_Full;
if (fw_speeds & BNXT_LINK_SPEED_MSK_10GB)
speed_mask |= ADVERTISED_10000baseT_Full;
- /* TODO: how to advertise 20, 25, 40, 50GB with different cable type ?*/
- if (fw_speeds & BNXT_LINK_SPEED_MSK_20GB)
- speed_mask |= ADVERTISED_20000baseMLD2_Full |
- ADVERTISED_20000baseKR2_Full;
if (fw_speeds & BNXT_LINK_SPEED_MSK_40GB)
- speed_mask |= ADVERTISED_40000baseKR4_Full |
- ADVERTISED_40000baseCR4_Full |
- ADVERTISED_40000baseSR4_Full |
- ADVERTISED_40000baseLR4_Full;
+ speed_mask |= ADVERTISED_40000baseCR4_Full;
return speed_mask;
}
@@ -557,11 +543,12 @@
u16 ethtool_speed;
cmd->supported = bnxt_fw_to_ethtool_support_spds(link_info);
+ cmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
if (link_info->auto_link_speeds)
cmd->supported |= SUPPORTED_Autoneg;
- if (BNXT_AUTO_MODE(link_info->auto_mode)) {
+ if (link_info->autoneg) {
cmd->advertising =
bnxt_fw_to_ethtool_advertised_spds(link_info);
cmd->advertising |= ADVERTISED_Autoneg;
@@ -570,28 +557,16 @@
cmd->autoneg = AUTONEG_DISABLE;
cmd->advertising = 0;
}
- if (link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) {
+ if (link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL) {
if ((link_info->auto_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
BNXT_LINK_PAUSE_BOTH) {
cmd->advertising |= ADVERTISED_Pause;
- cmd->supported |= SUPPORTED_Pause;
} else {
cmd->advertising |= ADVERTISED_Asym_Pause;
- cmd->supported |= SUPPORTED_Asym_Pause;
if (link_info->auto_pause_setting &
BNXT_LINK_PAUSE_RX)
cmd->advertising |= ADVERTISED_Pause;
}
- } else if (link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) {
- if ((link_info->force_pause_setting & BNXT_LINK_PAUSE_BOTH) ==
- BNXT_LINK_PAUSE_BOTH) {
- cmd->supported |= SUPPORTED_Pause;
- } else {
- cmd->supported |= SUPPORTED_Asym_Pause;
- if (link_info->force_pause_setting &
- BNXT_LINK_PAUSE_RX)
- cmd->supported |= SUPPORTED_Pause;
- }
}
cmd->port = PORT_NONE;
@@ -670,6 +645,9 @@
if (advertising & ADVERTISED_10000baseT_Full)
fw_speed_mask |= BNXT_LINK_SPEED_MSK_10GB;
+ if (advertising & ADVERTISED_40000baseCR4_Full)
+ fw_speed_mask |= BNXT_LINK_SPEED_MSK_40GB;
+
return fw_speed_mask;
}
@@ -729,7 +707,7 @@
speed = ethtool_cmd_speed(cmd);
link_info->req_link_speed = bnxt_get_fw_speed(dev, speed);
link_info->req_duplex = BNXT_LINK_DUPLEX_FULL;
- link_info->autoneg &= ~BNXT_AUTONEG_SPEED;
+ link_info->autoneg = 0;
link_info->advertising = 0;
}
@@ -748,8 +726,7 @@
if (BNXT_VF(bp))
return;
- epause->autoneg = !!(link_info->auto_pause_setting &
- BNXT_LINK_PAUSE_BOTH);
+ epause->autoneg = !!(link_info->autoneg & BNXT_AUTONEG_FLOW_CTRL);
epause->rx_pause = ((link_info->pause & BNXT_LINK_PAUSE_RX) != 0);
epause->tx_pause = ((link_info->pause & BNXT_LINK_PAUSE_TX) != 0);
}
@@ -765,6 +742,9 @@
return rc;
if (epause->autoneg) {
+ if (!(link_info->autoneg & BNXT_AUTONEG_SPEED))
+ return -EINVAL;
+
link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL;
link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_BOTH;
} else {
diff --git a/drivers/net/ethernet/broadcom/genet/bcmgenet.c b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
index b15a60d..d7e01a7 100644
--- a/drivers/net/ethernet/broadcom/genet/bcmgenet.c
+++ b/drivers/net/ethernet/broadcom/genet/bcmgenet.c
@@ -2445,8 +2445,7 @@
}
/* Link UP/DOWN event */
- if ((priv->hw_params->flags & GENET_HAS_MDIO_INTR) &&
- (priv->irq0_stat & UMAC_IRQ_LINK_EVENT)) {
+ if (priv->irq0_stat & UMAC_IRQ_LINK_EVENT) {
phy_mac_interrupt(priv->phydev,
!!(priv->irq0_stat & UMAC_IRQ_LINK_UP));
priv->irq0_stat &= ~UMAC_IRQ_LINK_EVENT;
diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
index 49eea89..3010080 100644
--- a/drivers/net/ethernet/broadcom/tg3.c
+++ b/drivers/net/ethernet/broadcom/tg3.c
@@ -7831,6 +7831,14 @@
return ret;
}
+static bool tg3_tso_bug_gso_check(struct tg3_napi *tnapi, struct sk_buff *skb)
+{
+ /* Check if we will never have enough descriptors,
+ * as gso_segs can be more than current ring size
+ */
+ return skb_shinfo(skb)->gso_segs < tnapi->tx_pending / 3;
+}
+
static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
/* Use GSO to workaround all TSO packets that meet HW bug conditions
@@ -7934,14 +7942,19 @@
* vlan encapsulated.
*/
if (skb->protocol == htons(ETH_P_8021Q) ||
- skb->protocol == htons(ETH_P_8021AD))
- return tg3_tso_bug(tp, tnapi, txq, skb);
+ skb->protocol == htons(ETH_P_8021AD)) {
+ if (tg3_tso_bug_gso_check(tnapi, skb))
+ return tg3_tso_bug(tp, tnapi, txq, skb);
+ goto drop;
+ }
if (!skb_is_gso_v6(skb)) {
if (unlikely((ETH_HLEN + hdr_len) > 80) &&
- tg3_flag(tp, TSO_BUG))
- return tg3_tso_bug(tp, tnapi, txq, skb);
-
+ tg3_flag(tp, TSO_BUG)) {
+ if (tg3_tso_bug_gso_check(tnapi, skb))
+ return tg3_tso_bug(tp, tnapi, txq, skb);
+ goto drop;
+ }
ip_csum = iph->check;
ip_tot_len = iph->tot_len;
iph->check = 0;
@@ -8073,7 +8086,7 @@
if (would_hit_hwbug) {
tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
- if (mss) {
+ if (mss && tg3_tso_bug_gso_check(tnapi, skb)) {
/* If it's a TSO packet, do GSO instead of
* allocating and copying to a large linear SKB
*/
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c
index 8727655..34d269c 100644
--- a/drivers/net/ethernet/cavium/liquidio/lio_main.c
+++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c
@@ -1683,7 +1683,7 @@
dev_dbg(&oct->pci_dev->dev, "Creating Droq: %d\n", q_no);
/* droq creation and local register settings. */
ret_val = octeon_create_droq(oct, q_no, num_descs, desc_size, app_ctx);
- if (ret_val == -1)
+ if (ret_val < 0)
return ret_val;
if (ret_val == 1) {
@@ -2524,7 +2524,7 @@
octeon_swap_8B_data(&resp->timestamp, 1);
- if (unlikely((skb_shinfo(skb)->tx_flags | SKBTX_IN_PROGRESS) != 0)) {
+ if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
struct skb_shared_hwtstamps ts;
u64 ns = resp->timestamp;
diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
index 4dba86e..174072b 100644
--- a/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
+++ b/drivers/net/ethernet/cavium/liquidio/octeon_droq.c
@@ -983,5 +983,5 @@
create_droq_fail:
octeon_delete_droq(oct, q_no);
- return -1;
+ return -ENOMEM;
}
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_main.c b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
index c24cb2a..a009bc3 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_main.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_main.c
@@ -574,8 +574,7 @@
static void nicvf_rcv_pkt_handler(struct net_device *netdev,
struct napi_struct *napi,
- struct cmp_queue *cq,
- struct cqe_rx_t *cqe_rx, int cqe_type)
+ struct cqe_rx_t *cqe_rx)
{
struct sk_buff *skb;
struct nicvf *nic = netdev_priv(netdev);
@@ -591,7 +590,7 @@
}
/* Check for errors */
- err = nicvf_check_cqe_rx_errs(nic, cq, cqe_rx);
+ err = nicvf_check_cqe_rx_errs(nic, cqe_rx);
if (err && !cqe_rx->rb_cnt)
return;
@@ -682,8 +681,7 @@
cq_idx, cq_desc->cqe_type);
switch (cq_desc->cqe_type) {
case CQE_TYPE_RX:
- nicvf_rcv_pkt_handler(netdev, napi, cq,
- cq_desc, CQE_TYPE_RX);
+ nicvf_rcv_pkt_handler(netdev, napi, cq_desc);
work_done++;
break;
case CQE_TYPE_SEND:
@@ -1125,7 +1123,6 @@
/* Clear multiqset info */
nic->pnicvf = nic;
- nic->sqs_count = 0;
return 0;
}
@@ -1354,6 +1351,9 @@
drv_stats->tx_frames_ok = stats->tx_ucast_frames_ok +
stats->tx_bcast_frames_ok +
stats->tx_mcast_frames_ok;
+ drv_stats->rx_frames_ok = stats->rx_ucast_frames +
+ stats->rx_bcast_frames +
+ stats->rx_mcast_frames;
drv_stats->rx_drops = stats->rx_drop_red +
stats->rx_drop_overrun;
drv_stats->tx_drops = stats->tx_drops;
@@ -1538,6 +1538,9 @@
nicvf_send_vf_struct(nic);
+ if (!pass1_silicon(nic->pdev))
+ nic->hw_tso = true;
+
/* Check if this VF is in QS only mode */
if (nic->sqs_mode)
return 0;
@@ -1557,9 +1560,6 @@
netdev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO;
- if (!pass1_silicon(nic->pdev))
- nic->hw_tso = true;
-
netdev->netdev_ops = &nicvf_netdev_ops;
netdev->watchdog_timeo = NICVF_TX_TIMEOUT;
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
index d0d1b54..767347b 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.c
@@ -1329,16 +1329,12 @@
}
/* Check for errors in the receive cmp.queue entry */
-int nicvf_check_cqe_rx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_rx_t *cqe_rx)
+int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx)
{
struct nicvf_hw_stats *stats = &nic->hw_stats;
- struct nicvf_drv_stats *drv_stats = &nic->drv_stats;
- if (!cqe_rx->err_level && !cqe_rx->err_opcode) {
- drv_stats->rx_frames_ok++;
+ if (!cqe_rx->err_level && !cqe_rx->err_opcode)
return 0;
- }
if (netif_msg_rx_err(nic))
netdev_err(nic->netdev,
diff --git a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
index c5030a7..6673e11 100644
--- a/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
+++ b/drivers/net/ethernet/cavium/thunder/nicvf_queues.h
@@ -338,8 +338,7 @@
/* Stats */
void nicvf_update_rq_stats(struct nicvf *nic, int rq_idx);
void nicvf_update_sq_stats(struct nicvf *nic, int sq_idx);
-int nicvf_check_cqe_rx_errs(struct nicvf *nic,
- struct cmp_queue *cq, struct cqe_rx_t *cqe_rx);
+int nicvf_check_cqe_rx_errs(struct nicvf *nic, struct cqe_rx_t *cqe_rx);
int nicvf_check_cqe_tx_errs(struct nicvf *nic,
struct cmp_queue *cq, struct cqe_send_t *cqe_tx);
#endif /* NICVF_QUEUES_H */
diff --git a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
index ee04caa..a89721f 100644
--- a/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
+++ b/drivers/net/ethernet/chelsio/cxgb3/t3_hw.c
@@ -681,6 +681,24 @@
return t3_seeprom_write(adapter, EEPROM_STAT_ADDR, enable ? 0xc : 0);
}
+static int vpdstrtouint(char *s, int len, unsigned int base, unsigned int *val)
+{
+ char tok[len + 1];
+
+ memcpy(tok, s, len);
+ tok[len] = 0;
+ return kstrtouint(strim(tok), base, val);
+}
+
+static int vpdstrtou16(char *s, int len, unsigned int base, u16 *val)
+{
+ char tok[len + 1];
+
+ memcpy(tok, s, len);
+ tok[len] = 0;
+ return kstrtou16(strim(tok), base, val);
+}
+
/**
* get_vpd_params - read VPD parameters from VPD EEPROM
* @adapter: adapter to read
@@ -709,19 +727,19 @@
return ret;
}
- ret = kstrtouint(vpd.cclk_data, 10, &p->cclk);
+ ret = vpdstrtouint(vpd.cclk_data, vpd.cclk_len, 10, &p->cclk);
if (ret)
return ret;
- ret = kstrtouint(vpd.mclk_data, 10, &p->mclk);
+ ret = vpdstrtouint(vpd.mclk_data, vpd.mclk_len, 10, &p->mclk);
if (ret)
return ret;
- ret = kstrtouint(vpd.uclk_data, 10, &p->uclk);
+ ret = vpdstrtouint(vpd.uclk_data, vpd.uclk_len, 10, &p->uclk);
if (ret)
return ret;
- ret = kstrtouint(vpd.mdc_data, 10, &p->mdc);
+ ret = vpdstrtouint(vpd.mdc_data, vpd.mdc_len, 10, &p->mdc);
if (ret)
return ret;
- ret = kstrtouint(vpd.mt_data, 10, &p->mem_timing);
+ ret = vpdstrtouint(vpd.mt_data, vpd.mt_len, 10, &p->mem_timing);
if (ret)
return ret;
memcpy(p->sn, vpd.sn_data, SERNUM_LEN);
@@ -733,10 +751,12 @@
} else {
p->port_type[0] = hex_to_bin(vpd.port0_data[0]);
p->port_type[1] = hex_to_bin(vpd.port1_data[0]);
- ret = kstrtou16(vpd.xaui0cfg_data, 16, &p->xauicfg[0]);
+ ret = vpdstrtou16(vpd.xaui0cfg_data, vpd.xaui0cfg_len, 16,
+ &p->xauicfg[0]);
if (ret)
return ret;
- ret = kstrtou16(vpd.xaui1cfg_data, 16, &p->xauicfg[1]);
+ ret = vpdstrtou16(vpd.xaui1cfg_data, vpd.xaui1cfg_len, 16,
+ &p->xauicfg[1]);
if (ret)
return ret;
}
diff --git a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
index a8dda63..06bc2d2 100644
--- a/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
+++ b/drivers/net/ethernet/chelsio/cxgb4/t4_pci_id_tbl.h
@@ -165,6 +165,7 @@
CH_PCI_ID_TABLE_FENTRY(0x5098), /* Custom 2x40G QSFP */
CH_PCI_ID_TABLE_FENTRY(0x5099), /* Custom 2x40G QSFP */
CH_PCI_ID_TABLE_FENTRY(0x509a), /* Custom T520-CR */
+ CH_PCI_ID_TABLE_FENTRY(0x509b), /* Custom T540-CR LOM */
/* T6 adapters:
*/
diff --git a/drivers/net/ethernet/cisco/enic/enic.h b/drivers/net/ethernet/cisco/enic/enic.h
index 1671fa3..7ba6d53 100644
--- a/drivers/net/ethernet/cisco/enic/enic.h
+++ b/drivers/net/ethernet/cisco/enic/enic.h
@@ -33,7 +33,7 @@
#define DRV_NAME "enic"
#define DRV_DESCRIPTION "Cisco VIC Ethernet NIC Driver"
-#define DRV_VERSION "2.3.0.12"
+#define DRV_VERSION "2.3.0.20"
#define DRV_COPYRIGHT "Copyright 2008-2013 Cisco Systems, Inc"
#define ENIC_BARS_MAX 6
diff --git a/drivers/net/ethernet/cisco/enic/vnic_dev.c b/drivers/net/ethernet/cisco/enic/vnic_dev.c
index 1ffd105..1fdf5fe 100644
--- a/drivers/net/ethernet/cisco/enic/vnic_dev.c
+++ b/drivers/net/ethernet/cisco/enic/vnic_dev.c
@@ -298,7 +298,8 @@
int wait)
{
struct devcmd2_controller *dc2c = vdev->devcmd2;
- struct devcmd2_result *result = dc2c->result + dc2c->next_result;
+ struct devcmd2_result *result;
+ u8 color;
unsigned int i;
int delay, err;
u32 fetch_index, new_posted;
@@ -336,13 +337,17 @@
if (dc2c->cmd_ring[posted].flags & DEVCMD2_FNORESULT)
return 0;
+ result = dc2c->result + dc2c->next_result;
+ color = dc2c->color;
+
+ dc2c->next_result++;
+ if (dc2c->next_result == dc2c->result_size) {
+ dc2c->next_result = 0;
+ dc2c->color = dc2c->color ? 0 : 1;
+ }
+
for (delay = 0; delay < wait; delay++) {
- if (result->color == dc2c->color) {
- dc2c->next_result++;
- if (dc2c->next_result == dc2c->result_size) {
- dc2c->next_result = 0;
- dc2c->color = dc2c->color ? 0 : 1;
- }
+ if (result->color == color) {
if (result->error) {
err = result->error;
if (err != ERR_ECMDUNKNOWN ||
diff --git a/drivers/net/ethernet/davicom/dm9000.c b/drivers/net/ethernet/davicom/dm9000.c
index cf94b72..48d9194 100644
--- a/drivers/net/ethernet/davicom/dm9000.c
+++ b/drivers/net/ethernet/davicom/dm9000.c
@@ -128,7 +128,6 @@
struct resource *data_res;
struct resource *addr_req; /* resources requested */
struct resource *data_req;
- struct resource *irq_res;
int irq_wake;
@@ -1300,22 +1299,16 @@
dm9000_open(struct net_device *dev)
{
struct board_info *db = netdev_priv(dev);
- unsigned long irqflags = db->irq_res->flags & IRQF_TRIGGER_MASK;
if (netif_msg_ifup(db))
dev_dbg(db->dev, "enabling %s\n", dev->name);
- /* If there is no IRQ type specified, default to something that
- * may work, and tell the user that this is a problem */
-
- if (irqflags == IRQF_TRIGGER_NONE)
- irqflags = irq_get_trigger_type(dev->irq);
-
- if (irqflags == IRQF_TRIGGER_NONE)
+ /* If there is no IRQ type specified, tell the user that this is a
+ * problem
+ */
+ if (irq_get_trigger_type(dev->irq) == IRQF_TRIGGER_NONE)
dev_warn(db->dev, "WARNING: no IRQ resource flags set.\n");
- irqflags |= IRQF_SHARED;
-
/* GPIO0 on pre-activate PHY, Reg 1F is not set by reset */
iow(db, DM9000_GPR, 0); /* REG_1F bit0 activate phyxcer */
mdelay(1); /* delay needs by DM9000B */
@@ -1323,7 +1316,8 @@
/* Initialize DM9000 board */
dm9000_init_dm9000(dev);
- if (request_irq(dev->irq, dm9000_interrupt, irqflags, dev->name, dev))
+ if (request_irq(dev->irq, dm9000_interrupt, IRQF_SHARED,
+ dev->name, dev))
return -EAGAIN;
/* Now that we have an interrupt handler hooked up we can unmask
* our interrupts
@@ -1500,15 +1494,22 @@
db->addr_res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
db->data_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
- db->irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
- if (db->addr_res == NULL || db->data_res == NULL ||
- db->irq_res == NULL) {
- dev_err(db->dev, "insufficient resources\n");
+ if (!db->addr_res || !db->data_res) {
+ dev_err(db->dev, "insufficient resources addr=%p data=%p\n",
+ db->addr_res, db->data_res);
ret = -ENOENT;
goto out;
}
+ ndev->irq = platform_get_irq(pdev, 0);
+ if (ndev->irq < 0) {
+ dev_err(db->dev, "interrupt resource unavailable: %d\n",
+ ndev->irq);
+ ret = ndev->irq;
+ goto out;
+ }
+
db->irq_wake = platform_get_irq(pdev, 1);
if (db->irq_wake >= 0) {
dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
@@ -1570,7 +1571,6 @@
/* fill in parameters for net-dev structure */
ndev->base_addr = (unsigned long)db->io_addr;
- ndev->irq = db->irq_res->start;
/* ensure at least we have a default set of IO routines */
dm9000_set_io(db, iosize);
diff --git a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
index a7139f5..678f501 100644
--- a/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
+++ b/drivers/net/ethernet/fujitsu/fmvj18x_cs.c
@@ -469,8 +469,8 @@
goto failed;
}
/* Read MACID from CIS */
- for (i = 5; i < 11; i++)
- dev->dev_addr[i] = buf[i];
+ for (i = 0; i < 6; i++)
+ dev->dev_addr[i] = buf[i + 5];
kfree(buf);
} else {
if (pcmcia_get_mac_from_cis(link, dev))
diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c
index 662c2ee..b0ae69f 100644
--- a/drivers/net/ethernet/marvell/mvneta.c
+++ b/drivers/net/ethernet/marvell/mvneta.c
@@ -370,6 +370,11 @@
struct net_device *dev;
struct notifier_block cpu_notifier;
int rxq_def;
+ /* Protect the access to the percpu interrupt registers,
+ * ensuring that the configuration remains coherent.
+ */
+ spinlock_t lock;
+ bool is_stopped;
/* Core clock */
struct clk *clk;
@@ -1038,6 +1043,43 @@
}
}
+static void mvneta_percpu_unmask_interrupt(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ /* All the queue are unmasked, but actually only the ones
+ * mapped to this CPU will be unmasked
+ */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK,
+ MVNETA_RX_INTR_MASK_ALL |
+ MVNETA_TX_INTR_MASK_ALL |
+ MVNETA_MISCINTR_INTR_MASK);
+}
+
+static void mvneta_percpu_mask_interrupt(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ /* All the queue are masked, but actually only the ones
+ * mapped to this CPU will be masked
+ */
+ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+}
+
+static void mvneta_percpu_clear_intr_cause(void *arg)
+{
+ struct mvneta_port *pp = arg;
+
+ /* All the queue are cleared, but actually only the ones
+ * mapped to this CPU will be cleared
+ */
+ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+ mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+}
+
/* This method sets defaults to the NETA port:
* Clears interrupt Cause and Mask registers.
* Clears all MAC tables.
@@ -1055,14 +1097,10 @@
int max_cpu = num_present_cpus();
/* Clear all Cause registers */
- mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0);
- mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
- mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
+ on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
/* Mask all interrupts */
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_ENABLE, 0);
/* Enable MBUS Retry bit16 */
@@ -2528,34 +2566,9 @@
return 0;
}
-static void mvneta_percpu_unmask_interrupt(void *arg)
-{
- struct mvneta_port *pp = arg;
-
- /* All the queue are unmasked, but actually only the ones
- * maped to this CPU will be unmasked
- */
- mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK_ALL |
- MVNETA_TX_INTR_MASK_ALL |
- MVNETA_MISCINTR_INTR_MASK);
-}
-
-static void mvneta_percpu_mask_interrupt(void *arg)
-{
- struct mvneta_port *pp = arg;
-
- /* All the queue are masked, but actually only the ones
- * maped to this CPU will be masked
- */
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
-}
-
static void mvneta_start_dev(struct mvneta_port *pp)
{
- unsigned int cpu;
+ int cpu;
mvneta_max_rx_size_set(pp, pp->pkt_size);
mvneta_txq_max_tx_size_set(pp, pp->pkt_size);
@@ -2564,16 +2577,15 @@
mvneta_port_enable(pp);
/* Enable polling on the port */
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
napi_enable(&port->napi);
}
/* Unmask interrupts. It has to be done from each CPU */
- for_each_online_cpu(cpu)
- smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt,
- pp, true);
+ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
+
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE |
@@ -2589,7 +2601,7 @@
phy_stop(pp->phy_dev);
- for_each_present_cpu(cpu) {
+ for_each_online_cpu(cpu) {
struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu);
napi_disable(&port->napi);
@@ -2604,13 +2616,10 @@
mvneta_port_disable(pp);
/* Clear all ethernet port interrupts */
- mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0);
- mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0);
+ on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true);
/* Mask all ethernet port interrupts */
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
mvneta_tx_reset(pp);
mvneta_rx_reset(pp);
@@ -2847,11 +2856,20 @@
disable_percpu_irq(pp->dev->irq);
}
+/* Electing a CPU must be done in an atomic way: it should be done
+ * after or before the removal/insertion of a CPU and this function is
+ * not reentrant.
+ */
static void mvneta_percpu_elect(struct mvneta_port *pp)
{
- int online_cpu_idx, max_cpu, cpu, i = 0;
+ int elected_cpu = 0, max_cpu, cpu, i = 0;
- online_cpu_idx = pp->rxq_def % num_online_cpus();
+ /* Use the cpu associated to the rxq when it is online, in all
+ * the other cases, use the cpu 0 which can't be offline.
+ */
+ if (cpu_online(pp->rxq_def))
+ elected_cpu = pp->rxq_def;
+
max_cpu = num_present_cpus();
for_each_online_cpu(cpu) {
@@ -2862,7 +2880,7 @@
if ((rxq % max_cpu) == cpu)
rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq);
- if (i == online_cpu_idx)
+ if (cpu == elected_cpu)
/* Map the default receive queue queue to the
* elected CPU
*/
@@ -2873,7 +2891,7 @@
* the CPU bound to the default RX queue
*/
if (txq_number == 1)
- txq_map = (i == online_cpu_idx) ?
+ txq_map = (cpu == elected_cpu) ?
MVNETA_CPU_TXQ_ACCESS(1) : 0;
else
txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) &
@@ -2902,6 +2920,14 @@
switch (action) {
case CPU_ONLINE:
case CPU_ONLINE_FROZEN:
+ spin_lock(&pp->lock);
+ /* Configuring the driver for a new CPU while the
+ * driver is stopping is racy, so just avoid it.
+ */
+ if (pp->is_stopped) {
+ spin_unlock(&pp->lock);
+ break;
+ }
netif_tx_stop_all_queues(pp->dev);
/* We have to synchronise on tha napi of each CPU
@@ -2917,9 +2943,7 @@
}
/* Mask all ethernet port interrupts */
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
napi_enable(&port->napi);
@@ -2934,27 +2958,25 @@
*/
mvneta_percpu_elect(pp);
- /* Unmask all ethernet port interrupts, as this
- * notifier is called for each CPU then the CPU to
- * Queue mapping is applied
- */
- mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number) |
- MVNETA_TX_INTR_MASK(txq_number) |
- MVNETA_MISCINTR_INTR_MASK);
+ /* Unmask all ethernet port interrupts */
+ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE |
MVNETA_CAUSE_PSC_SYNC_CHANGE);
netif_tx_start_all_queues(pp->dev);
+ spin_unlock(&pp->lock);
break;
case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN:
netif_tx_stop_all_queues(pp->dev);
+ /* Thanks to this lock we are sure that any pending
+ * cpu election is done
+ */
+ spin_lock(&pp->lock);
/* Mask all ethernet port interrupts */
- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0);
- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0);
- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0);
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
+ spin_unlock(&pp->lock);
napi_synchronize(&port->napi);
napi_disable(&port->napi);
@@ -2968,12 +2990,11 @@
case CPU_DEAD:
case CPU_DEAD_FROZEN:
/* Check if a new CPU must be elected now this on is down */
+ spin_lock(&pp->lock);
mvneta_percpu_elect(pp);
+ spin_unlock(&pp->lock);
/* Unmask all ethernet port interrupts */
- mvreg_write(pp, MVNETA_INTR_NEW_MASK,
- MVNETA_RX_INTR_MASK(rxq_number) |
- MVNETA_TX_INTR_MASK(txq_number) |
- MVNETA_MISCINTR_INTR_MASK);
+ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true);
mvreg_write(pp, MVNETA_INTR_MISC_MASK,
MVNETA_CAUSE_PHY_STATUS_CHANGE |
MVNETA_CAUSE_LINK_CHANGE |
@@ -2988,7 +3009,7 @@
static int mvneta_open(struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
- int ret, cpu;
+ int ret;
pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu);
pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) +
@@ -3010,22 +3031,12 @@
goto err_cleanup_txqs;
}
- /* Even though the documentation says that request_percpu_irq
- * doesn't enable the interrupts automatically, it actually
- * does so on the local CPU.
- *
- * Make sure it's disabled.
- */
- mvneta_percpu_disable(pp);
-
/* Enable per-CPU interrupt on all the CPU to handle our RX
* queue interrupts
*/
- for_each_online_cpu(cpu)
- smp_call_function_single(cpu, mvneta_percpu_enable,
- pp, true);
+ on_each_cpu(mvneta_percpu_enable, pp, true);
-
+ pp->is_stopped = false;
/* Register a CPU notifier to handle the case where our CPU
* might be taken offline.
*/
@@ -3057,13 +3068,20 @@
static int mvneta_stop(struct net_device *dev)
{
struct mvneta_port *pp = netdev_priv(dev);
- int cpu;
+ /* Inform that we are stopping so we don't want to setup the
+ * driver for new CPUs in the notifiers
+ */
+ spin_lock(&pp->lock);
+ pp->is_stopped = true;
mvneta_stop_dev(pp);
mvneta_mdio_remove(pp);
unregister_cpu_notifier(&pp->cpu_notifier);
- for_each_present_cpu(cpu)
- smp_call_function_single(cpu, mvneta_percpu_disable, pp, true);
+ /* Now that the notifier are unregistered, we can release le
+ * lock
+ */
+ spin_unlock(&pp->lock);
+ on_each_cpu(mvneta_percpu_disable, pp, true);
free_percpu_irq(dev->irq, pp->ports);
mvneta_cleanup_rxqs(pp);
mvneta_cleanup_txqs(pp);
@@ -3312,9 +3330,7 @@
netif_tx_stop_all_queues(pp->dev);
- for_each_online_cpu(cpu)
- smp_call_function_single(cpu, mvneta_percpu_mask_interrupt,
- pp, true);
+ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
/* We have to synchronise on the napi of each CPU */
for_each_online_cpu(cpu) {
@@ -3335,7 +3351,9 @@
mvreg_write(pp, MVNETA_PORT_CONFIG, val);
/* Update the elected CPU matching the new rxq_def */
+ spin_lock(&pp->lock);
mvneta_percpu_elect(pp);
+ spin_unlock(&pp->lock);
/* We have to synchronise on the napi of each CPU */
for_each_online_cpu(cpu) {
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index a4beccf..c797971a 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -3061,7 +3061,7 @@
pe = kzalloc(sizeof(*pe), GFP_KERNEL);
if (!pe)
- return -1;
+ return -ENOMEM;
mvpp2_prs_tcam_lu_set(pe, MVPP2_PRS_LU_MAC);
pe->index = tid;
@@ -3077,7 +3077,7 @@
if (pmap == 0) {
if (add) {
kfree(pe);
- return -1;
+ return -EINVAL;
}
mvpp2_prs_hw_inv(priv, pe->index);
priv->prs_shadow[pe->index].valid = false;
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 715de8a..c7e9399 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -182,10 +182,17 @@
err = mlx4_reset_slave(dev);
else
err = mlx4_reset_master(dev);
- BUG_ON(err != 0);
+ if (!err) {
+ mlx4_err(dev, "device was reset successfully\n");
+ } else {
+ /* EEH could have disabled the PCI channel during reset. That's
+ * recoverable and the PCI error flow will handle it.
+ */
+ if (!pci_channel_offline(dev->persist->pdev))
+ BUG_ON(1);
+ }
dev->persist->state |= MLX4_DEVICE_STATE_INTERNAL_ERROR;
- mlx4_err(dev, "device was reset successfully\n");
mutex_unlock(&persist->device_state_mutex);
/* At that step HW was already reset, now notify clients */
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index d48d579..e94ca1c 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2429,7 +2429,7 @@
flush_workqueue(priv->mfunc.master.comm_wq);
destroy_workqueue(priv->mfunc.master.comm_wq);
err_slaves:
- while (--i) {
+ while (i--) {
for (port = 1; port <= MLX4_MAX_PORTS; port++)
kfree(priv->mfunc.master.slave_state[i].vlan_filter[port]);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/cq.c b/drivers/net/ethernet/mellanox/mlx4/cq.c
index 3348e64..a849da9 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cq.c
@@ -318,7 +318,9 @@
if (timestamp_en)
cq_context->flags |= cpu_to_be32(1 << 19);
- cq_context->logsize_usrpage = cpu_to_be32((ilog2(nent) << 24) | uar->index);
+ cq_context->logsize_usrpage =
+ cpu_to_be32((ilog2(nent) << 24) |
+ mlx4_to_hw_uar_index(dev, uar->index));
cq_context->comp_eqn = priv->eq_table.eq[MLX4_CQ_TO_EQ_VECTOR(vector)].eqn;
cq_context->log_page_size = mtt->page_shift - MLX4_ICM_PAGE_SHIFT;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_clock.c b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
index 038f9ce..1494997 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_clock.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_clock.c
@@ -236,6 +236,24 @@
.enable = mlx4_en_phc_enable,
};
+#define MLX4_EN_WRAP_AROUND_SEC 10ULL
+
+/* This function calculates the max shift that enables the user range
+ * of MLX4_EN_WRAP_AROUND_SEC values in the cycles register.
+ */
+static u32 freq_to_shift(u16 freq)
+{
+ u32 freq_khz = freq * 1000;
+ u64 max_val_cycles = freq_khz * 1000 * MLX4_EN_WRAP_AROUND_SEC;
+ u64 max_val_cycles_rounded = is_power_of_2(max_val_cycles + 1) ?
+ max_val_cycles : roundup_pow_of_two(max_val_cycles) - 1;
+ /* calculate max possible multiplier in order to fit in 64bit */
+ u64 max_mul = div_u64(0xffffffffffffffffULL, max_val_cycles_rounded);
+
+ /* This comes from the reverse of clocksource_khz2mult */
+ return ilog2(div_u64(max_mul * freq_khz, 1000000));
+}
+
void mlx4_en_init_timestamp(struct mlx4_en_dev *mdev)
{
struct mlx4_dev *dev = mdev->dev;
@@ -254,12 +272,7 @@
memset(&mdev->cycles, 0, sizeof(mdev->cycles));
mdev->cycles.read = mlx4_en_read_clock;
mdev->cycles.mask = CLOCKSOURCE_MASK(48);
- /* Using shift to make calculation more accurate. Since current HW
- * clock frequency is 427 MHz, and cycles are given using a 48 bits
- * register, the biggest shift when calculating using u64, is 14
- * (max_cycles * multiplier < 2^64)
- */
- mdev->cycles.shift = 14;
+ mdev->cycles.shift = freq_to_shift(dev->caps.hca_core_clock);
mdev->cycles.mult =
clocksource_khz2mult(1000 * dev->caps.hca_core_clock, mdev->cycles.shift);
mdev->nominal_c_mult = mdev->cycles.mult;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
index 0c7e3f6..f191a16 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_netdev.c
@@ -2344,8 +2344,6 @@
/* set offloads */
priv->dev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL;
- priv->dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
- priv->dev->features |= NETIF_F_GSO_UDP_TUNNEL;
}
static void mlx4_en_del_vxlan_offloads(struct work_struct *work)
@@ -2356,8 +2354,6 @@
/* unset offloads */
priv->dev->hw_enc_features &= ~(NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
NETIF_F_TSO | NETIF_F_GSO_UDP_TUNNEL);
- priv->dev->hw_features &= ~NETIF_F_GSO_UDP_TUNNEL;
- priv->dev->features &= ~NETIF_F_GSO_UDP_TUNNEL;
ret = mlx4_SET_PORT_VXLAN(priv->mdev->dev, priv->port,
VXLAN_STEER_BY_OUTER_MAC, 0);
@@ -2980,6 +2976,11 @@
priv->rss_hash_fn = ETH_RSS_HASH_TOP;
}
+ if (mdev->dev->caps.tunnel_offload_mode == MLX4_TUNNEL_OFFLOAD_MODE_VXLAN) {
+ dev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
+ dev->features |= NETIF_F_GSO_UDP_TUNNEL;
+ }
+
mdev->pndev[port] = dev;
mdev->upper[port] = NULL;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_port.c b/drivers/net/ethernet/mellanox/mlx4/en_port.c
index ee99e67..3904b5f 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_port.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_port.c
@@ -238,11 +238,11 @@
stats->collisions = 0;
stats->rx_dropped = be32_to_cpu(mlx4_en_stats->RDROP);
stats->rx_length_errors = be32_to_cpu(mlx4_en_stats->RdropLength);
- stats->rx_over_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_over_errors = 0;
stats->rx_crc_errors = be32_to_cpu(mlx4_en_stats->RCRC);
stats->rx_frame_errors = 0;
stats->rx_fifo_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
- stats->rx_missed_errors = be32_to_cpu(mlx4_en_stats->RdropOvflw);
+ stats->rx_missed_errors = 0;
stats->tx_aborted_errors = 0;
stats->tx_carrier_errors = 0;
stats->tx_fifo_errors = 0;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_resources.c b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
index 12aab5a..02e925d 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_resources.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_resources.c
@@ -58,7 +58,8 @@
} else {
context->sq_size_stride = ilog2(TXBB_SIZE) - 4;
}
- context->usr_page = cpu_to_be32(mdev->priv_uar.index);
+ context->usr_page = cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
+ mdev->priv_uar.index));
context->local_qpn = cpu_to_be32(qpn);
context->pri_path.ackto = 1 & 0x07;
context->pri_path.sched_queue = 0x83 | (priv->port - 1) << 6;
diff --git a/drivers/net/ethernet/mellanox/mlx4/en_tx.c b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
index 4421bf5..e0946ab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/en_tx.c
+++ b/drivers/net/ethernet/mellanox/mlx4/en_tx.c
@@ -213,7 +213,9 @@
mlx4_en_fill_qp_context(priv, ring->size, ring->stride, 1, 0, ring->qpn,
ring->cqn, user_prio, &ring->context);
if (ring->bf_alloced)
- ring->context.usr_page = cpu_to_be32(ring->bf.uar->index);
+ ring->context.usr_page =
+ cpu_to_be32(mlx4_to_hw_uar_index(mdev->dev,
+ ring->bf.uar->index));
err = mlx4_qp_to_ready(mdev->dev, &ring->wqres.mtt, &ring->context,
&ring->qp, &ring->qp_state);
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 4696053..f613977 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -940,9 +940,10 @@
if (!priv->eq_table.uar_map[index]) {
priv->eq_table.uar_map[index] =
- ioremap(pci_resource_start(dev->persist->pdev, 2) +
- ((eq->eqn / 4) << PAGE_SHIFT),
- PAGE_SIZE);
+ ioremap(
+ pci_resource_start(dev->persist->pdev, 2) +
+ ((eq->eqn / 4) << (dev->uar_page_shift)),
+ (1 << (dev->uar_page_shift)));
if (!priv->eq_table.uar_map[index]) {
mlx4_err(dev, "Couldn't map EQ doorbell for EQN 0x%06x\n",
eq->eqn);
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index f1b6d21..2cc3c62 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -168,6 +168,20 @@
static atomic_t pf_loading = ATOMIC_INIT(0);
+static inline void mlx4_set_num_reserved_uars(struct mlx4_dev *dev,
+ struct mlx4_dev_cap *dev_cap)
+{
+ /* The reserved_uars is calculated by system page size unit.
+ * Therefore, adjustment is added when the uar page size is less
+ * than the system page size
+ */
+ dev->caps.reserved_uars =
+ max_t(int,
+ mlx4_get_num_reserved_uar(dev),
+ dev_cap->reserved_uars /
+ (1 << (PAGE_SHIFT - dev->uar_page_shift)));
+}
+
int mlx4_check_port_params(struct mlx4_dev *dev,
enum mlx4_port_type *port_type)
{
@@ -386,8 +400,6 @@
dev->caps.reserved_mtts = dev_cap->reserved_mtts;
dev->caps.reserved_mrws = dev_cap->reserved_mrws;
- /* The first 128 UARs are used for EQ doorbells */
- dev->caps.reserved_uars = max_t(int, 128, dev_cap->reserved_uars);
dev->caps.reserved_pds = dev_cap->reserved_pds;
dev->caps.reserved_xrcds = (dev->caps.flags & MLX4_DEV_CAP_FLAG_XRC) ?
dev_cap->reserved_xrcds : 0;
@@ -405,6 +417,15 @@
dev->caps.max_gso_sz = dev_cap->max_gso_sz;
dev->caps.max_rss_tbl_sz = dev_cap->max_rss_tbl_sz;
+ /* Save uar page shift */
+ if (!mlx4_is_slave(dev)) {
+ /* Virtual PCI function needs to determine UAR page size from
+ * firmware. Only master PCI function can set the uar page size
+ */
+ dev->uar_page_shift = DEFAULT_UAR_PAGE_SHIFT;
+ mlx4_set_num_reserved_uars(dev, dev_cap);
+ }
+
if (dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_PHV_EN) {
struct mlx4_init_hca_param hca_param;
@@ -815,16 +836,25 @@
return -ENODEV;
}
- /* slave gets uar page size from QUERY_HCA fw command */
- dev->caps.uar_page_size = 1 << (hca_param.uar_page_sz + 12);
+ /* Set uar_page_shift for VF */
+ dev->uar_page_shift = hca_param.uar_page_sz + 12;
- /* TODO: relax this assumption */
- if (dev->caps.uar_page_size != PAGE_SIZE) {
- mlx4_err(dev, "UAR size:%d != kernel PAGE_SIZE of %ld\n",
- dev->caps.uar_page_size, PAGE_SIZE);
- return -ENODEV;
+ /* Make sure the master uar page size is valid */
+ if (dev->uar_page_shift > PAGE_SHIFT) {
+ mlx4_err(dev,
+ "Invalid configuration: uar page size is larger than system page size\n");
+ return -ENODEV;
}
+ /* Set reserved_uars based on the uar_page_shift */
+ mlx4_set_num_reserved_uars(dev, &dev_cap);
+
+ /* Although uar page size in FW differs from system page size,
+ * upper software layers (mlx4_ib, mlx4_en and part of mlx4_core)
+ * still works with assumption that uar page size == system page size
+ */
+ dev->caps.uar_page_size = PAGE_SIZE;
+
memset(&func_cap, 0, sizeof(func_cap));
err = mlx4_QUERY_FUNC_CAP(dev, 0, &func_cap);
if (err) {
@@ -2179,8 +2209,12 @@
dev->caps.max_fmr_maps = (1 << (32 - ilog2(dev->caps.num_mpts))) - 1;
- init_hca.log_uar_sz = ilog2(dev->caps.num_uars);
- init_hca.uar_page_sz = PAGE_SHIFT - 12;
+ /* Always set UAR page size 4KB, set log_uar_sz accordingly */
+ init_hca.log_uar_sz = ilog2(dev->caps.num_uars) +
+ PAGE_SHIFT -
+ DEFAULT_UAR_PAGE_SHIFT;
+ init_hca.uar_page_sz = DEFAULT_UAR_PAGE_SHIFT - 12;
+
init_hca.mw_enabled = 0;
if (dev->caps.flags & MLX4_DEV_CAP_FLAG_MEM_WINDOW ||
dev->caps.bmme_flags & MLX4_BMME_FLAG_TYPE_2_WIN)
diff --git a/drivers/net/ethernet/mellanox/mlx4/pd.c b/drivers/net/ethernet/mellanox/mlx4/pd.c
index 609c59d..b3cc3ab 100644
--- a/drivers/net/ethernet/mellanox/mlx4/pd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/pd.c
@@ -269,9 +269,15 @@
int mlx4_init_uar_table(struct mlx4_dev *dev)
{
- if (dev->caps.num_uars <= 128) {
- mlx4_err(dev, "Only %d UAR pages (need more than 128)\n",
- dev->caps.num_uars);
+ int num_reserved_uar = mlx4_get_num_reserved_uar(dev);
+
+ mlx4_dbg(dev, "uar_page_shift = %d", dev->uar_page_shift);
+ mlx4_dbg(dev, "Effective reserved_uars=%d", dev->caps.reserved_uars);
+
+ if (dev->caps.num_uars <= num_reserved_uar) {
+ mlx4_err(
+ dev, "Only %d UAR pages (need more than %d)\n",
+ dev->caps.num_uars, num_reserved_uar);
mlx4_err(dev, "Increase firmware log2_uar_bar_megabytes?\n");
return -ENODEV;
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index b46dbe2..25ce1b0 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -915,11 +915,13 @@
spin_lock_irq(mlx4_tlock(dev));
r = find_res(dev, counter_index, RES_COUNTER);
- if (!r || r->owner != slave)
+ if (!r || r->owner != slave) {
ret = -EINVAL;
- counter = container_of(r, struct res_counter, com);
- if (!counter->port)
- counter->port = port;
+ } else {
+ counter = container_of(r, struct res_counter, com);
+ if (!counter->port)
+ counter->port = port;
+ }
spin_unlock_irq(mlx4_tlock(dev));
return ret;
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
index 6a3e430..d4e1c30 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c
@@ -2024,18 +2024,37 @@
vf_stats);
}
-static struct net_device_ops mlx5e_netdev_ops = {
+static const struct net_device_ops mlx5e_netdev_ops_basic = {
.ndo_open = mlx5e_open,
.ndo_stop = mlx5e_close,
.ndo_start_xmit = mlx5e_xmit,
.ndo_get_stats64 = mlx5e_get_stats,
.ndo_set_rx_mode = mlx5e_set_rx_mode,
.ndo_set_mac_address = mlx5e_set_mac,
- .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
- .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
+ .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
.ndo_set_features = mlx5e_set_features,
- .ndo_change_mtu = mlx5e_change_mtu,
- .ndo_do_ioctl = mlx5e_ioctl,
+ .ndo_change_mtu = mlx5e_change_mtu,
+ .ndo_do_ioctl = mlx5e_ioctl,
+};
+
+static const struct net_device_ops mlx5e_netdev_ops_sriov = {
+ .ndo_open = mlx5e_open,
+ .ndo_stop = mlx5e_close,
+ .ndo_start_xmit = mlx5e_xmit,
+ .ndo_get_stats64 = mlx5e_get_stats,
+ .ndo_set_rx_mode = mlx5e_set_rx_mode,
+ .ndo_set_mac_address = mlx5e_set_mac,
+ .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
+ .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
+ .ndo_set_features = mlx5e_set_features,
+ .ndo_change_mtu = mlx5e_change_mtu,
+ .ndo_do_ioctl = mlx5e_ioctl,
+ .ndo_set_vf_mac = mlx5e_set_vf_mac,
+ .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
+ .ndo_get_vf_config = mlx5e_get_vf_config,
+ .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
+ .ndo_get_vf_stats = mlx5e_get_vf_stats,
};
static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
@@ -2137,18 +2156,11 @@
SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
- if (priv->params.num_tc > 1)
- mlx5e_netdev_ops.ndo_select_queue = mlx5e_select_queue;
+ if (MLX5_CAP_GEN(mdev, vport_group_manager))
+ netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
+ else
+ netdev->netdev_ops = &mlx5e_netdev_ops_basic;
- if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
- mlx5e_netdev_ops.ndo_set_vf_mac = mlx5e_set_vf_mac;
- mlx5e_netdev_ops.ndo_set_vf_vlan = mlx5e_set_vf_vlan;
- mlx5e_netdev_ops.ndo_get_vf_config = mlx5e_get_vf_config;
- mlx5e_netdev_ops.ndo_set_vf_link_state = mlx5e_set_vf_link_state;
- mlx5e_netdev_ops.ndo_get_vf_stats = mlx5e_get_vf_stats;
- }
-
- netdev->netdev_ops = &mlx5e_netdev_ops;
netdev->watchdog_timeo = 15 * HZ;
netdev->ethtool_ops = &mlx5e_ethtool_ops;
diff --git a/drivers/net/ethernet/mellanox/mlxsw/port.h b/drivers/net/ethernet/mellanox/mlxsw/port.h
index 726f543..ae65b99 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/port.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/port.h
@@ -49,7 +49,7 @@
#define MLXSW_PORT_MID 0xd000
#define MLXSW_PORT_MAX_PHY_PORTS 0x40
-#define MLXSW_PORT_MAX_PORTS MLXSW_PORT_MAX_PHY_PORTS
+#define MLXSW_PORT_MAX_PORTS (MLXSW_PORT_MAX_PHY_PORTS + 1)
#define MLXSW_PORT_DEVID_BITS_OFFSET 10
#define MLXSW_PORT_PHY_BITS_OFFSET 4
diff --git a/drivers/net/ethernet/mellanox/mlxsw/reg.h b/drivers/net/ethernet/mellanox/mlxsw/reg.h
index bb77e22..ffe4c03 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/reg.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/reg.h
@@ -873,6 +873,62 @@
}
}
+/* SPAFT - Switch Port Acceptable Frame Types
+ * ------------------------------------------
+ * The Switch Port Acceptable Frame Types register configures the frame
+ * admittance of the port.
+ */
+#define MLXSW_REG_SPAFT_ID 0x2010
+#define MLXSW_REG_SPAFT_LEN 0x08
+
+static const struct mlxsw_reg_info mlxsw_reg_spaft = {
+ .id = MLXSW_REG_SPAFT_ID,
+ .len = MLXSW_REG_SPAFT_LEN,
+};
+
+/* reg_spaft_local_port
+ * Local port number.
+ * Access: Index
+ *
+ * Note: CPU port is not supported (all tag types are allowed).
+ */
+MLXSW_ITEM32(reg, spaft, local_port, 0x00, 16, 8);
+
+/* reg_spaft_sub_port
+ * Virtual port within the physical port.
+ * Should be set to 0 when virtual ports are not enabled on the port.
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spaft, sub_port, 0x00, 8, 8);
+
+/* reg_spaft_allow_untagged
+ * When set, untagged frames on the ingress are allowed (default).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spaft, allow_untagged, 0x04, 31, 1);
+
+/* reg_spaft_allow_prio_tagged
+ * When set, priority tagged frames on the ingress are allowed (default).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spaft, allow_prio_tagged, 0x04, 30, 1);
+
+/* reg_spaft_allow_tagged
+ * When set, tagged frames on the ingress are allowed (default).
+ * Access: RW
+ */
+MLXSW_ITEM32(reg, spaft, allow_tagged, 0x04, 29, 1);
+
+static inline void mlxsw_reg_spaft_pack(char *payload, u8 local_port,
+ bool allow_untagged)
+{
+ MLXSW_REG_ZERO(spaft, payload);
+ mlxsw_reg_spaft_local_port_set(payload, local_port);
+ mlxsw_reg_spaft_allow_untagged_set(payload, allow_untagged);
+ mlxsw_reg_spaft_allow_prio_tagged_set(payload, true);
+ mlxsw_reg_spaft_allow_tagged_set(payload, true);
+}
+
/* SFGC - Switch Flooding Group Configuration
* ------------------------------------------
* The following register controls the association of flooding tables and MIDs
@@ -3203,6 +3259,8 @@
return "SPVID";
case MLXSW_REG_SPVM_ID:
return "SPVM";
+ case MLXSW_REG_SPAFT_ID:
+ return "SPAFT";
case MLXSW_REG_SFGC_ID:
return "SFGC";
case MLXSW_REG_SFTR_ID:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
index 217856b..09ce451 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.c
@@ -2123,6 +2123,8 @@
if (flush_fdb && mlxsw_sp_port_fdb_flush(mlxsw_sp_port))
netdev_err(mlxsw_sp_port->dev, "Failed to flush FDB\n");
+ mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+
mlxsw_sp_port->learning = 0;
mlxsw_sp_port->learning_sync = 0;
mlxsw_sp_port->uc_flood = 0;
@@ -2746,6 +2748,13 @@
goto err_vport_flood_set;
}
+ err = mlxsw_sp_port_stp_state_set(mlxsw_sp_vport, vid,
+ MLXSW_REG_SPMS_STATE_FORWARDING);
+ if (err) {
+ netdev_err(dev, "Failed to set STP state\n");
+ goto err_port_stp_state_set;
+ }
+
if (flush_fdb && mlxsw_sp_vport_fdb_flush(mlxsw_sp_vport))
netdev_err(dev, "Failed to flush FDB\n");
@@ -2763,6 +2772,7 @@
return 0;
+err_port_stp_state_set:
err_vport_flood_set:
err_port_vid_learning_set:
err_port_vid_to_fid_validate:
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
index 7f42eb1..3b89ed2 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum.h
@@ -254,5 +254,6 @@
int mlxsw_sp_vport_flood_set(struct mlxsw_sp_port *mlxsw_sp_vport, u16 vfid,
bool set, bool only_uc);
void mlxsw_sp_port_active_vlans_del(struct mlxsw_sp_port *mlxsw_sp_port);
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid);
#endif
diff --git a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
index e492ca2..7b56098 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/spectrum_switchdev.c
@@ -370,7 +370,8 @@
return err;
}
-static int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+static int __mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ u16 vid)
{
struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
char spvid_pl[MLXSW_REG_SPVID_LEN];
@@ -379,6 +380,53 @@
return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spvid), spvid_pl);
}
+static int mlxsw_sp_port_allow_untagged_set(struct mlxsw_sp_port *mlxsw_sp_port,
+ bool allow)
+{
+ struct mlxsw_sp *mlxsw_sp = mlxsw_sp_port->mlxsw_sp;
+ char spaft_pl[MLXSW_REG_SPAFT_LEN];
+
+ mlxsw_reg_spaft_pack(spaft_pl, mlxsw_sp_port->local_port, allow);
+ return mlxsw_reg_write(mlxsw_sp->core, MLXSW_REG(spaft), spaft_pl);
+}
+
+int mlxsw_sp_port_pvid_set(struct mlxsw_sp_port *mlxsw_sp_port, u16 vid)
+{
+ struct net_device *dev = mlxsw_sp_port->dev;
+ int err;
+
+ if (!vid) {
+ err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port, false);
+ if (err) {
+ netdev_err(dev, "Failed to disallow untagged traffic\n");
+ return err;
+ }
+ } else {
+ err = __mlxsw_sp_port_pvid_set(mlxsw_sp_port, vid);
+ if (err) {
+ netdev_err(dev, "Failed to set PVID\n");
+ return err;
+ }
+
+ /* Only allow if not already allowed. */
+ if (!mlxsw_sp_port->pvid) {
+ err = mlxsw_sp_port_allow_untagged_set(mlxsw_sp_port,
+ true);
+ if (err) {
+ netdev_err(dev, "Failed to allow untagged traffic\n");
+ goto err_port_allow_untagged_set;
+ }
+ }
+ }
+
+ mlxsw_sp_port->pvid = vid;
+ return 0;
+
+err_port_allow_untagged_set:
+ __mlxsw_sp_port_pvid_set(mlxsw_sp_port, mlxsw_sp_port->pvid);
+ return err;
+}
+
static int mlxsw_sp_fid_create(struct mlxsw_sp *mlxsw_sp, u16 fid)
{
char sfmr_pl[MLXSW_REG_SFMR_LEN];
@@ -540,7 +588,12 @@
netdev_err(dev, "Unable to add PVID %d\n", vid_begin);
goto err_port_pvid_set;
}
- mlxsw_sp_port->pvid = vid_begin;
+ } else if (!flag_pvid && old_pvid >= vid_begin && old_pvid <= vid_end) {
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
+ if (err) {
+ netdev_err(dev, "Unable to del PVID\n");
+ goto err_port_pvid_set;
+ }
}
/* Changing activity bits only if HW operation succeded */
@@ -892,20 +945,18 @@
return err;
}
+ if (init)
+ goto out;
+
pvid = mlxsw_sp_port->pvid;
- if (pvid >= vid_begin && pvid <= vid_end && pvid != 1) {
- /* Default VLAN is always 1 */
- err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 1);
+ if (pvid >= vid_begin && pvid <= vid_end) {
+ err = mlxsw_sp_port_pvid_set(mlxsw_sp_port, 0);
if (err) {
netdev_err(dev, "Unable to del PVID %d\n", pvid);
return err;
}
- mlxsw_sp_port->pvid = 1;
}
- if (init)
- goto out;
-
err = __mlxsw_sp_port_flood_set(mlxsw_sp_port, vid_begin, vid_end,
false, false);
if (err) {
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index 17d5571..537974c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -6137,28 +6137,28 @@
sw_cnt_1ms_ini = 16000000/rg_saw_cnt;
sw_cnt_1ms_ini &= 0x0fff;
data = r8168_mac_ocp_read(tp, 0xd412);
- data &= 0x0fff;
+ data &= ~0x0fff;
data |= sw_cnt_1ms_ini;
r8168_mac_ocp_write(tp, 0xd412, data);
}
data = r8168_mac_ocp_read(tp, 0xe056);
- data &= 0xf0;
- data |= 0x07;
+ data &= ~0xf0;
+ data |= 0x70;
r8168_mac_ocp_write(tp, 0xe056, data);
data = r8168_mac_ocp_read(tp, 0xe052);
- data &= 0x8008;
- data |= 0x6000;
+ data &= ~0x6000;
+ data |= 0x8008;
r8168_mac_ocp_write(tp, 0xe052, data);
data = r8168_mac_ocp_read(tp, 0xe0d6);
- data &= 0x01ff;
+ data &= ~0x01ff;
data |= 0x017f;
r8168_mac_ocp_write(tp, 0xe0d6, data);
data = r8168_mac_ocp_read(tp, 0xd420);
- data &= 0x0fff;
+ data &= ~0x0fff;
data |= 0x047f;
r8168_mac_ocp_write(tp, 0xd420, data);
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index ac43ed9..744d780 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -1139,7 +1139,8 @@
if (netif_running(ndev)) {
netif_device_detach(ndev);
/* Stop PTP Clock driver */
- ravb_ptp_stop(ndev);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
error = ravb_stop_dma(ndev);
if (error) {
@@ -1170,7 +1171,8 @@
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
- ravb_ptp_init(ndev, priv->pdev);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_ptp_init(ndev, priv->pdev);
netif_device_attach(ndev);
}
@@ -1298,7 +1300,8 @@
netif_tx_stop_all_queues(ndev);
/* Stop PTP Clock driver */
- ravb_ptp_stop(ndev);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_ptp_stop(ndev);
/* Wait for DMA stopping */
ravb_stop_dma(ndev);
@@ -1311,7 +1314,8 @@
ravb_emac_init(ndev);
/* Initialise PTP Clock driver */
- ravb_ptp_init(ndev, priv->pdev);
+ if (priv->chip_id == RCAR_GEN2)
+ ravb_ptp_init(ndev, priv->pdev);
netif_tx_start_all_queues(ndev);
}
@@ -1814,10 +1818,6 @@
CCC_OPC_CONFIG | CCC_GAC | CCC_CSEL_HPB, CCC);
}
- /* Set CSEL value */
- ravb_write(ndev, (ravb_read(ndev, CCC) & ~CCC_CSEL) | CCC_CSEL_HPB,
- CCC);
-
/* Set GTI value */
error = ravb_set_gti(ndev);
if (error)
diff --git a/drivers/net/ethernet/smsc/smc91x.c b/drivers/net/ethernet/smsc/smc91x.c
index 0e2fc1a..db7db8a 100644
--- a/drivers/net/ethernet/smsc/smc91x.c
+++ b/drivers/net/ethernet/smsc/smc91x.c
@@ -2342,8 +2342,8 @@
}
ndev->irq = platform_get_irq(pdev, 0);
- if (ndev->irq <= 0) {
- ret = -ENODEV;
+ if (ndev->irq < 0) {
+ ret = ndev->irq;
goto out_release_io;
}
/*
diff --git a/drivers/net/ethernet/synopsys/dwc_eth_qos.c b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
index 70814b7..fc8bbff 100644
--- a/drivers/net/ethernet/synopsys/dwc_eth_qos.c
+++ b/drivers/net/ethernet/synopsys/dwc_eth_qos.c
@@ -1880,9 +1880,9 @@
}
netdev_reset_queue(ndev);
+ dwceqos_init_hw(lp);
napi_enable(&lp->napi);
phy_start(lp->phy_dev);
- dwceqos_init_hw(lp);
netif_start_queue(ndev);
tasklet_enable(&lp->tx_bdreclaim_tasklet);
diff --git a/drivers/net/ethernet/ti/cpsw-phy-sel.c b/drivers/net/ethernet/ti/cpsw-phy-sel.c
index e9cc61e..c3e85ac 100644
--- a/drivers/net/ethernet/ti/cpsw-phy-sel.c
+++ b/drivers/net/ethernet/ti/cpsw-phy-sel.c
@@ -63,8 +63,12 @@
mode = AM33XX_GMII_SEL_MODE_RGMII;
break;
- case PHY_INTERFACE_MODE_MII:
default:
+ dev_warn(priv->dev,
+ "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
+ phy_modes(phy_mode));
+ /* fallthrough */
+ case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
break;
};
@@ -106,8 +110,12 @@
mode = AM33XX_GMII_SEL_MODE_RGMII;
break;
- case PHY_INTERFACE_MODE_MII:
default:
+ dev_warn(priv->dev,
+ "Unsupported PHY mode: \"%s\". Defaulting to MII.\n",
+ phy_modes(phy_mode));
+ /* fallthrough */
+ case PHY_INTERFACE_MODE_MII:
mode = AM33XX_GMII_SEL_MODE_MII;
break;
};
diff --git a/drivers/net/ethernet/ti/netcp_core.c b/drivers/net/ethernet/ti/netcp_core.c
index c61d66d..029841f 100644
--- a/drivers/net/ethernet/ti/netcp_core.c
+++ b/drivers/net/ethernet/ti/netcp_core.c
@@ -117,21 +117,17 @@
*ndesc = le32_to_cpu(desc->next_desc);
}
-static void get_pad_info(u32 *pad0, u32 *pad1, u32 *pad2, struct knav_dma_desc *desc)
+static u32 get_sw_data(int index, struct knav_dma_desc *desc)
{
- *pad0 = le32_to_cpu(desc->pad[0]);
- *pad1 = le32_to_cpu(desc->pad[1]);
- *pad2 = le32_to_cpu(desc->pad[2]);
+ /* No Endian conversion needed as this data is untouched by hw */
+ return desc->sw_data[index];
}
-static void get_pad_ptr(void **padptr, struct knav_dma_desc *desc)
-{
- u64 pad64;
-
- pad64 = le32_to_cpu(desc->pad[0]) +
- ((u64)le32_to_cpu(desc->pad[1]) << 32);
- *padptr = (void *)(uintptr_t)pad64;
-}
+/* use these macros to get sw data */
+#define GET_SW_DATA0(desc) get_sw_data(0, desc)
+#define GET_SW_DATA1(desc) get_sw_data(1, desc)
+#define GET_SW_DATA2(desc) get_sw_data(2, desc)
+#define GET_SW_DATA3(desc) get_sw_data(3, desc)
static void get_org_pkt_info(dma_addr_t *buff, u32 *buff_len,
struct knav_dma_desc *desc)
@@ -163,13 +159,18 @@
desc->packet_info = cpu_to_le32(pkt_info);
}
-static void set_pad_info(u32 pad0, u32 pad1, u32 pad2, struct knav_dma_desc *desc)
+static void set_sw_data(int index, u32 data, struct knav_dma_desc *desc)
{
- desc->pad[0] = cpu_to_le32(pad0);
- desc->pad[1] = cpu_to_le32(pad1);
- desc->pad[2] = cpu_to_le32(pad1);
+ /* No Endian conversion needed as this data is untouched by hw */
+ desc->sw_data[index] = data;
}
+/* use these macros to set sw data */
+#define SET_SW_DATA0(data, desc) set_sw_data(0, data, desc)
+#define SET_SW_DATA1(data, desc) set_sw_data(1, data, desc)
+#define SET_SW_DATA2(data, desc) set_sw_data(2, data, desc)
+#define SET_SW_DATA3(data, desc) set_sw_data(3, data, desc)
+
static void set_org_pkt_info(dma_addr_t buff, u32 buff_len,
struct knav_dma_desc *desc)
{
@@ -581,7 +582,6 @@
dma_addr_t dma_desc, dma_buf;
unsigned int buf_len, dma_sz = sizeof(*ndesc);
void *buf_ptr;
- u32 pad[2];
u32 tmp;
get_words(&dma_desc, 1, &desc->next_desc);
@@ -593,14 +593,20 @@
break;
}
get_pkt_info(&dma_buf, &tmp, &dma_desc, ndesc);
- get_pad_ptr(&buf_ptr, ndesc);
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ buf_ptr = (void *)GET_SW_DATA0(ndesc);
+ buf_len = (int)GET_SW_DATA1(desc);
dma_unmap_page(netcp->dev, dma_buf, PAGE_SIZE, DMA_FROM_DEVICE);
__free_page(buf_ptr);
knav_pool_desc_put(netcp->rx_pool, desc);
}
-
- get_pad_info(&pad[0], &pad[1], &buf_len, desc);
- buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32));
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ buf_ptr = (void *)GET_SW_DATA0(desc);
+ buf_len = (int)GET_SW_DATA1(desc);
if (buf_ptr)
netcp_frag_free(buf_len <= PAGE_SIZE, buf_ptr);
@@ -639,7 +645,6 @@
dma_addr_t dma_desc, dma_buff;
struct netcp_packet p_info;
struct sk_buff *skb;
- u32 pad[2];
void *org_buf_ptr;
dma_desc = knav_queue_pop(netcp->rx_queue, &dma_sz);
@@ -653,8 +658,11 @@
}
get_pkt_info(&dma_buff, &buf_len, &dma_desc, desc);
- get_pad_info(&pad[0], &pad[1], &org_buf_len, desc);
- org_buf_ptr = (void *)(uintptr_t)(pad[0] + ((u64)pad[1] << 32));
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ org_buf_ptr = (void *)GET_SW_DATA0(desc);
+ org_buf_len = (int)GET_SW_DATA1(desc);
if (unlikely(!org_buf_ptr)) {
dev_err(netcp->ndev_dev, "NULL bufptr in desc\n");
@@ -679,7 +687,6 @@
/* Fill in the page fragment list */
while (dma_desc) {
struct page *page;
- void *ptr;
ndesc = knav_pool_desc_unmap(netcp->rx_pool, dma_desc, dma_sz);
if (unlikely(!ndesc)) {
@@ -688,8 +695,10 @@
}
get_pkt_info(&dma_buff, &buf_len, &dma_desc, ndesc);
- get_pad_ptr(&ptr, ndesc);
- page = ptr;
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ page = (struct page *)GET_SW_DATA0(desc);
if (likely(dma_buff && buf_len && page)) {
dma_unmap_page(netcp->dev, dma_buff, PAGE_SIZE,
@@ -777,7 +786,10 @@
}
get_org_pkt_info(&dma, &buf_len, desc);
- get_pad_ptr(&buf_ptr, desc);
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ buf_ptr = (void *)GET_SW_DATA0(desc);
if (unlikely(!dma)) {
dev_err(netcp->ndev_dev, "NULL orig_buff in desc\n");
@@ -829,7 +841,7 @@
struct page *page;
dma_addr_t dma;
void *bufptr;
- u32 pad[3];
+ u32 sw_data[2];
/* Allocate descriptor */
hwdesc = knav_pool_desc_get(netcp->rx_pool);
@@ -846,7 +858,7 @@
SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
bufptr = netdev_alloc_frag(primary_buf_len);
- pad[2] = primary_buf_len;
+ sw_data[1] = primary_buf_len;
if (unlikely(!bufptr)) {
dev_warn_ratelimited(netcp->ndev_dev,
@@ -858,9 +870,10 @@
if (unlikely(dma_mapping_error(netcp->dev, dma)))
goto fail;
- pad[0] = lower_32_bits((uintptr_t)bufptr);
- pad[1] = upper_32_bits((uintptr_t)bufptr);
-
+ /* warning!!!! We are saving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ sw_data[0] = (u32)bufptr;
} else {
/* Allocate a secondary receive queue entry */
page = alloc_page(GFP_ATOMIC | GFP_DMA | __GFP_COLD);
@@ -870,9 +883,11 @@
}
buf_len = PAGE_SIZE;
dma = dma_map_page(netcp->dev, page, 0, buf_len, DMA_TO_DEVICE);
- pad[0] = lower_32_bits(dma);
- pad[1] = upper_32_bits(dma);
- pad[2] = 0;
+ /* warning!!!! We are saving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ sw_data[0] = (u32)page;
+ sw_data[1] = 0;
}
desc_info = KNAV_DMA_DESC_PS_INFO_IN_DESC;
@@ -882,7 +897,8 @@
pkt_info |= (netcp->rx_queue_id & KNAV_DMA_DESC_RETQ_MASK) <<
KNAV_DMA_DESC_RETQ_SHIFT;
set_org_pkt_info(dma, buf_len, hwdesc);
- set_pad_info(pad[0], pad[1], pad[2], hwdesc);
+ SET_SW_DATA0(sw_data[0], hwdesc);
+ SET_SW_DATA1(sw_data[1], hwdesc);
set_desc_info(desc_info, pkt_info, hwdesc);
/* Push to FDQs */
@@ -971,7 +987,6 @@
unsigned int budget)
{
struct knav_dma_desc *desc;
- void *ptr;
struct sk_buff *skb;
unsigned int dma_sz;
dma_addr_t dma;
@@ -988,8 +1003,10 @@
continue;
}
- get_pad_ptr(&ptr, desc);
- skb = ptr;
+ /* warning!!!! We are retrieving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ skb = (struct sk_buff *)GET_SW_DATA0(desc);
netcp_free_tx_desc_chain(netcp, desc, dma_sz);
if (!skb) {
dev_err(netcp->ndev_dev, "No skb in Tx desc\n");
@@ -1194,10 +1211,10 @@
}
set_words(&tmp, 1, &desc->packet_info);
- tmp = lower_32_bits((uintptr_t)&skb);
- set_words(&tmp, 1, &desc->pad[0]);
- tmp = upper_32_bits((uintptr_t)&skb);
- set_words(&tmp, 1, &desc->pad[1]);
+ /* warning!!!! We are saving the virtual ptr in the sw_data
+ * field as a 32bit value. Will not work on 64bit machines
+ */
+ SET_SW_DATA0((u32)skb, desc);
if (tx_pipe->flags & SWITCH_TO_PORT_IN_TAGINFO) {
tmp = tx_pipe->switch_to_port;
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 0b14ac3..0bf7edd 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -1039,6 +1039,34 @@
return geneve_xmit_skb(skb, dev, info);
}
+static int __geneve_change_mtu(struct net_device *dev, int new_mtu, bool strict)
+{
+ /* The max_mtu calculation does not take account of GENEVE
+ * options, to avoid excluding potentially valid
+ * configurations.
+ */
+ int max_mtu = IP_MAX_MTU - GENEVE_BASE_HLEN - sizeof(struct iphdr)
+ - dev->hard_header_len;
+
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ if (new_mtu > max_mtu) {
+ if (strict)
+ return -EINVAL;
+
+ new_mtu = max_mtu;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
+static int geneve_change_mtu(struct net_device *dev, int new_mtu)
+{
+ return __geneve_change_mtu(dev, new_mtu, true);
+}
+
static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb)
{
struct ip_tunnel_info *info = skb_tunnel_info(skb);
@@ -1083,7 +1111,7 @@
.ndo_stop = geneve_stop,
.ndo_start_xmit = geneve_xmit,
.ndo_get_stats64 = ip_tunnel_get_stats64,
- .ndo_change_mtu = eth_change_mtu,
+ .ndo_change_mtu = geneve_change_mtu,
.ndo_validate_addr = eth_validate_addr,
.ndo_set_mac_address = eth_mac_addr,
.ndo_fill_metadata_dst = geneve_fill_metadata_dst,
@@ -1150,6 +1178,7 @@
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
netif_keep_dst(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
eth_hw_addr_random(dev);
}
@@ -1441,12 +1470,23 @@
return dev;
err = geneve_configure(net, dev, &geneve_remote_unspec,
- 0, 0, 0, htons(dst_port), true, 0);
- if (err) {
- free_netdev(dev);
- return ERR_PTR(err);
- }
+ 0, 0, 0, htons(dst_port), true,
+ GENEVE_F_UDP_ZERO_CSUM6_RX);
+ if (err)
+ goto err;
+
+ /* openvswitch users expect packet sizes to be unrestricted,
+ * so set the largest MTU we can.
+ */
+ err = __geneve_change_mtu(dev, IP_MAX_MTU, false);
+ if (err)
+ goto err;
+
return dev;
+
+ err:
+ free_netdev(dev);
+ return ERR_PTR(err);
}
EXPORT_SYMBOL_GPL(geneve_dev_create_fb);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index 1d3a665..98e34fe 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -1089,6 +1089,9 @@
net->ethtool_ops = ðtool_ops;
SET_NETDEV_DEV(net, &dev->device);
+ /* We always need headroom for rndis header */
+ net->needed_headroom = RNDIS_AND_PPI_SIZE;
+
/* Notify the netvsc driver of the new device */
memset(&device_info, 0, sizeof(device_info));
device_info.ring_size = ring_size;
diff --git a/drivers/net/phy/bcm7xxx.c b/drivers/net/phy/bcm7xxx.c
index bf241a3..db507e3 100644
--- a/drivers/net/phy/bcm7xxx.c
+++ b/drivers/net/phy/bcm7xxx.c
@@ -250,10 +250,6 @@
phy_write(phydev, MII_BCM7XXX_AUX_MODE, MII_BCM7XX_64CLK_MDIO);
phy_read(phydev, MII_BCM7XXX_AUX_MODE);
- /* Workaround only required for 100Mbits/sec capable PHYs */
- if (phydev->supported & PHY_GBIT_FEATURES)
- return 0;
-
/* set shadow mode 2 */
ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST,
MII_BCM7XXX_SHD_MODE_2, MII_BCM7XXX_SHD_MODE_2);
@@ -270,7 +266,7 @@
phy_write(phydev, MII_BCM7XXX_100TX_FALSE_CAR, 0x7555);
/* reset shadow mode 2 */
- ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, MII_BCM7XXX_SHD_MODE_2, 0);
+ ret = phy_set_clr_bits(phydev, MII_BCM7XXX_TEST, 0, MII_BCM7XXX_SHD_MODE_2);
if (ret < 0)
return ret;
@@ -307,11 +303,6 @@
return 0;
}
-static int bcm7xxx_dummy_config_init(struct phy_device *phydev)
-{
- return 0;
-}
-
#define BCM7XXX_28NM_GPHY(_oui, _name) \
{ \
.phy_id = (_oui), \
@@ -337,7 +328,7 @@
.phy_id = PHY_ID_BCM7425,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM7425",
- .features = PHY_GBIT_FEATURES |
+ .features = PHY_BASIC_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
.flags = PHY_IS_INTERNAL,
.config_init = bcm7xxx_config_init,
@@ -349,7 +340,7 @@
.phy_id = PHY_ID_BCM7429,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM7429",
- .features = PHY_GBIT_FEATURES |
+ .features = PHY_BASIC_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
.flags = PHY_IS_INTERNAL,
.config_init = bcm7xxx_config_init,
@@ -361,7 +352,7 @@
.phy_id = PHY_ID_BCM7435,
.phy_id_mask = 0xfffffff0,
.name = "Broadcom BCM7435",
- .features = PHY_GBIT_FEATURES |
+ .features = PHY_BASIC_FEATURES |
SUPPORTED_Pause | SUPPORTED_Asym_Pause,
.flags = PHY_IS_INTERNAL,
.config_init = bcm7xxx_config_init,
@@ -369,30 +360,6 @@
.read_status = genphy_read_status,
.suspend = bcm7xxx_suspend,
.resume = bcm7xxx_config_init,
-}, {
- .phy_id = PHY_BCM_OUI_4,
- .phy_id_mask = 0xffff0000,
- .name = "Broadcom BCM7XXX 40nm",
- .features = PHY_GBIT_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
- .flags = PHY_IS_INTERNAL,
- .config_init = bcm7xxx_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .suspend = bcm7xxx_suspend,
- .resume = bcm7xxx_config_init,
-}, {
- .phy_id = PHY_BCM_OUI_5,
- .phy_id_mask = 0xffffff00,
- .name = "Broadcom BCM7XXX 65nm",
- .features = PHY_BASIC_FEATURES |
- SUPPORTED_Pause | SUPPORTED_Asym_Pause,
- .flags = PHY_IS_INTERNAL,
- .config_init = bcm7xxx_dummy_config_init,
- .config_aneg = genphy_config_aneg,
- .read_status = genphy_read_status,
- .suspend = bcm7xxx_suspend,
- .resume = bcm7xxx_config_init,
} };
static struct mdio_device_id __maybe_unused bcm7xxx_tbl[] = {
@@ -404,8 +371,6 @@
{ PHY_ID_BCM7439, 0xfffffff0, },
{ PHY_ID_BCM7435, 0xfffffff0, },
{ PHY_ID_BCM7445, 0xfffffff0, },
- { PHY_BCM_OUI_4, 0xffff0000 },
- { PHY_BCM_OUI_5, 0xffffff00 },
{ }
};
diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c
index e3eb964..ab1d0fc 100644
--- a/drivers/net/phy/marvell.c
+++ b/drivers/net/phy/marvell.c
@@ -446,6 +446,12 @@
if (err < 0)
return err;
+ return 0;
+}
+
+static int marvell_config_init(struct phy_device *phydev)
+{
+ /* Set registers from marvell,reg-init DT property */
return marvell_of_reg_init(phydev);
}
@@ -495,7 +501,7 @@
mdelay(500);
- return 0;
+ return marvell_config_init(phydev);
}
static int m88e3016_config_init(struct phy_device *phydev)
@@ -514,7 +520,7 @@
if (reg < 0)
return reg;
- return 0;
+ return marvell_config_init(phydev);
}
static int m88e1111_config_init(struct phy_device *phydev)
@@ -1078,6 +1084,7 @@
.features = PHY_GBIT_FEATURES,
.probe = marvell_probe,
.flags = PHY_HAS_INTERRUPT,
+ .config_init = &marvell_config_init,
.config_aneg = &marvell_config_aneg,
.read_status = &genphy_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1149,6 +1156,7 @@
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
+ .config_init = &marvell_config_init,
.config_aneg = &m88e1121_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1167,6 +1175,7 @@
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
+ .config_init = &marvell_config_init,
.config_aneg = &m88e1318_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1259,6 +1268,7 @@
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
+ .config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
@@ -1277,6 +1287,7 @@
.features = PHY_GBIT_FEATURES,
.flags = PHY_HAS_INTERRUPT,
.probe = marvell_probe,
+ .config_init = &marvell_config_init,
.config_aneg = &m88e1510_config_aneg,
.read_status = &marvell_read_status,
.ack_interrupt = &marvell_ack_interrupt,
diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c
index bad3f00..e551f3a 100644
--- a/drivers/net/phy/phy_device.c
+++ b/drivers/net/phy/phy_device.c
@@ -1410,7 +1410,7 @@
features = (SUPPORTED_TP | SUPPORTED_MII
| SUPPORTED_AUI | SUPPORTED_FIBRE |
- SUPPORTED_BNC);
+ SUPPORTED_BNC | SUPPORTED_Pause | SUPPORTED_Asym_Pause);
/* Do we support autonegotiation? */
val = phy_read(phydev, MII_BMSR);
diff --git a/drivers/net/ppp/pppoe.c b/drivers/net/ppp/pppoe.c
index f3c6302..4ddae81 100644
--- a/drivers/net/ppp/pppoe.c
+++ b/drivers/net/ppp/pppoe.c
@@ -395,6 +395,8 @@
if (!__pppoe_xmit(sk_pppox(relay_po), skb))
goto abort_put;
+
+ sock_put(sk_pppox(relay_po));
} else {
if (sock_queue_rcv_skb(sk, skb))
goto abort_kfree;
diff --git a/drivers/net/usb/Kconfig b/drivers/net/usb/Kconfig
index 7f83504..cdde590 100644
--- a/drivers/net/usb/Kconfig
+++ b/drivers/net/usb/Kconfig
@@ -395,6 +395,10 @@
The protocol specification is incomplete, and is controlled by
(and for) Microsoft; it isn't an "Open" ecosystem or market.
+config USB_NET_CDC_SUBSET_ENABLE
+ tristate
+ depends on USB_NET_CDC_SUBSET
+
config USB_NET_CDC_SUBSET
tristate "Simple USB Network Links (CDC Ethernet subset)"
depends on USB_USBNET
@@ -413,6 +417,7 @@
config USB_ALI_M5632
bool "ALi M5632 based 'USB 2.0 Data Link' cables"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
help
Choose this option if you're using a host-to-host cable
based on this design, which supports USB 2.0 high speed.
@@ -420,6 +425,7 @@
config USB_AN2720
bool "AnchorChips 2720 based cables (Xircom PGUNET, ...)"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
help
Choose this option if you're using a host-to-host cable
based on this design. Note that AnchorChips is now a
@@ -428,6 +434,7 @@
config USB_BELKIN
bool "eTEK based host-to-host cables (Advance, Belkin, ...)"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
default y
help
Choose this option if you're using a host-to-host cable
@@ -437,6 +444,7 @@
config USB_ARMLINUX
bool "Embedded ARM Linux links (iPaq, ...)"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
default y
help
Choose this option to support the "usb-eth" networking driver
@@ -454,6 +462,7 @@
config USB_EPSON2888
bool "Epson 2888 based firmware (DEVELOPMENT)"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
help
Choose this option to support the usb networking links used
by some sample firmware from Epson.
@@ -461,6 +470,7 @@
config USB_KC2190
bool "KT Technology KC2190 based cables (InstaNet)"
depends on USB_NET_CDC_SUBSET
+ select USB_NET_CDC_SUBSET_ENABLE
help
Choose this option if you're using a host-to-host cable
with one of these chips.
diff --git a/drivers/net/usb/Makefile b/drivers/net/usb/Makefile
index b5f0406..37fb46ae 100644
--- a/drivers/net/usb/Makefile
+++ b/drivers/net/usb/Makefile
@@ -23,7 +23,7 @@
obj-$(CONFIG_USB_NET_NET1080) += net1080.o
obj-$(CONFIG_USB_NET_PLUSB) += plusb.o
obj-$(CONFIG_USB_NET_RNDIS_HOST) += rndis_host.o
-obj-$(CONFIG_USB_NET_CDC_SUBSET) += cdc_subset.o
+obj-$(CONFIG_USB_NET_CDC_SUBSET_ENABLE) += cdc_subset.o
obj-$(CONFIG_USB_NET_ZAURUS) += zaurus.o
obj-$(CONFIG_USB_NET_MCS7830) += mcs7830.o
obj-$(CONFIG_USB_USBNET) += usbnet.o
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 23e9880..570deef 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -637,6 +637,7 @@
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
+ {QMI_FIXED_INTF(0x05c6, 0x6001, 3)}, /* 4G LTE usb-modem U901 */
{QMI_FIXED_INTF(0x05c6, 0x7000, 0)},
{QMI_FIXED_INTF(0x05c6, 0x7001, 1)},
{QMI_FIXED_INTF(0x05c6, 0x7002, 1)},
diff --git a/drivers/net/vmxnet3/vmxnet3_defs.h b/drivers/net/vmxnet3/vmxnet3_defs.h
index 221a530..72ba8ae 100644
--- a/drivers/net/vmxnet3/vmxnet3_defs.h
+++ b/drivers/net/vmxnet3/vmxnet3_defs.h
@@ -377,7 +377,7 @@
#define VMXNET3_TX_RING_MAX_SIZE 4096
#define VMXNET3_TC_RING_MAX_SIZE 4096
#define VMXNET3_RX_RING_MAX_SIZE 4096
-#define VMXNET3_RX_RING2_MAX_SIZE 2048
+#define VMXNET3_RX_RING2_MAX_SIZE 4096
#define VMXNET3_RC_RING_MAX_SIZE 8192
/* a list of reasons for queue stop */
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index bdb8a6c..729c344 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -69,10 +69,10 @@
/*
* Version numbers
*/
-#define VMXNET3_DRIVER_VERSION_STRING "1.4.5.0-k"
+#define VMXNET3_DRIVER_VERSION_STRING "1.4.6.0-k"
/* a 32-bit int, each byte encode a verion number in VMXNET3_DRIVER_VERSION */
-#define VMXNET3_DRIVER_VERSION_NUM 0x01040500
+#define VMXNET3_DRIVER_VERSION_NUM 0x01040600
#if defined(CONFIG_PCI_MSI)
/* RSS only makes sense if MSI-X is supported. */
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 6543918..e6944b2 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -2171,9 +2171,11 @@
#endif
}
- if (vxlan->flags & VXLAN_F_COLLECT_METADATA &&
- info && info->mode & IP_TUNNEL_INFO_TX) {
- vxlan_xmit_one(skb, dev, NULL, false);
+ if (vxlan->flags & VXLAN_F_COLLECT_METADATA) {
+ if (info && info->mode & IP_TUNNEL_INFO_TX)
+ vxlan_xmit_one(skb, dev, NULL, false);
+ else
+ kfree_skb(skb);
return NETDEV_TX_OK;
}
@@ -2367,27 +2369,41 @@
{
}
+static int __vxlan_change_mtu(struct net_device *dev,
+ struct net_device *lowerdev,
+ struct vxlan_rdst *dst, int new_mtu, bool strict)
+{
+ int max_mtu = IP_MAX_MTU;
+
+ if (lowerdev)
+ max_mtu = lowerdev->mtu;
+
+ if (dst->remote_ip.sa.sa_family == AF_INET6)
+ max_mtu -= VXLAN6_HEADROOM;
+ else
+ max_mtu -= VXLAN_HEADROOM;
+
+ if (new_mtu < 68)
+ return -EINVAL;
+
+ if (new_mtu > max_mtu) {
+ if (strict)
+ return -EINVAL;
+
+ new_mtu = max_mtu;
+ }
+
+ dev->mtu = new_mtu;
+ return 0;
+}
+
static int vxlan_change_mtu(struct net_device *dev, int new_mtu)
{
struct vxlan_dev *vxlan = netdev_priv(dev);
struct vxlan_rdst *dst = &vxlan->default_dst;
- struct net_device *lowerdev;
- int max_mtu;
-
- lowerdev = __dev_get_by_index(vxlan->net, dst->remote_ifindex);
- if (lowerdev == NULL)
- return eth_change_mtu(dev, new_mtu);
-
- if (dst->remote_ip.sa.sa_family == AF_INET6)
- max_mtu = lowerdev->mtu - VXLAN6_HEADROOM;
- else
- max_mtu = lowerdev->mtu - VXLAN_HEADROOM;
-
- if (new_mtu < 68 || new_mtu > max_mtu)
- return -EINVAL;
-
- dev->mtu = new_mtu;
- return 0;
+ struct net_device *lowerdev = __dev_get_by_index(vxlan->net,
+ dst->remote_ifindex);
+ return __vxlan_change_mtu(dev, lowerdev, dst, new_mtu, true);
}
static int egress_ipv4_tun_info(struct net_device *dev, struct sk_buff *skb,
@@ -2523,6 +2539,7 @@
dev->hw_features |= NETIF_F_GSO_SOFTWARE;
dev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_STAG_TX;
netif_keep_dst(dev);
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
dev->priv_flags |= IFF_LIVE_ADDR_CHANGE | IFF_NO_QUEUE;
INIT_LIST_HEAD(&vxlan->next);
@@ -2765,6 +2782,7 @@
int err;
bool use_ipv6 = false;
__be16 default_port = vxlan->cfg.dst_port;
+ struct net_device *lowerdev = NULL;
vxlan->net = src_net;
@@ -2785,9 +2803,7 @@
}
if (conf->remote_ifindex) {
- struct net_device *lowerdev
- = __dev_get_by_index(src_net, conf->remote_ifindex);
-
+ lowerdev = __dev_get_by_index(src_net, conf->remote_ifindex);
dst->remote_ifindex = conf->remote_ifindex;
if (!lowerdev) {
@@ -2811,6 +2827,12 @@
needed_headroom = lowerdev->hard_header_len;
}
+ if (conf->mtu) {
+ err = __vxlan_change_mtu(dev, lowerdev, dst, conf->mtu, false);
+ if (err)
+ return err;
+ }
+
if (use_ipv6 || conf->flags & VXLAN_F_COLLECT_METADATA)
needed_headroom += VXLAN6_HEADROOM;
else
diff --git a/drivers/net/wan/dscc4.c b/drivers/net/wan/dscc4.c
index 7a72407..6292259 100644
--- a/drivers/net/wan/dscc4.c
+++ b/drivers/net/wan/dscc4.c
@@ -1626,7 +1626,7 @@
if (state & Xpr) {
void __iomem *scc_addr;
unsigned long ring;
- int i;
+ unsigned int i;
/*
* - the busy condition happens (sometimes);
diff --git a/drivers/net/wireless/intel/iwlwifi/Kconfig b/drivers/net/wireless/intel/iwlwifi/Kconfig
index 8660677..7438fbe 100644
--- a/drivers/net/wireless/intel/iwlwifi/Kconfig
+++ b/drivers/net/wireless/intel/iwlwifi/Kconfig
@@ -53,7 +53,6 @@
config IWLDVM
tristate "Intel Wireless WiFi DVM Firmware support"
- depends on m
help
This is the driver that supports the DVM firmware. The list
of the devices that use this firmware is available here:
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
index c84a029..bce9b3420 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-8000.c
@@ -7,6 +7,7 @@
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2014 - 2015 Intel Mobile Communications GmbH
+ * Copyright(c) 2016 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -70,12 +71,15 @@
/* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 20
+#define IWL8265_UCODE_API_MAX 20
/* Oldest version we won't warn about */
#define IWL8000_UCODE_API_OK 13
+#define IWL8265_UCODE_API_OK 20
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 13
+#define IWL8265_UCODE_API_MIN 20
/* NVM versions */
#define IWL8000_NVM_VERSION 0x0a1d
@@ -93,6 +97,10 @@
#define IWL8000_MODULE_FIRMWARE(api) \
IWL8000_FW_PRE "-" __stringify(api) ".ucode"
+#define IWL8265_FW_PRE "iwlwifi-8265-"
+#define IWL8265_MODULE_FIRMWARE(api) \
+ IWL8265_FW_PRE __stringify(api) ".ucode"
+
#define NVM_HW_SECTION_NUM_FAMILY_8000 10
#define DEFAULT_NVM_FILE_FAMILY_8000B "nvmData-8000B"
#define DEFAULT_NVM_FILE_FAMILY_8000C "nvmData-8000C"
@@ -144,10 +152,7 @@
.support_tx_backoff = true,
};
-#define IWL_DEVICE_8000 \
- .ucode_api_max = IWL8000_UCODE_API_MAX, \
- .ucode_api_ok = IWL8000_UCODE_API_OK, \
- .ucode_api_min = IWL8000_UCODE_API_MIN, \
+#define IWL_DEVICE_8000_COMMON \
.device_family = IWL_DEVICE_FAMILY_8000, \
.max_inst_size = IWL60_RTC_INST_SIZE, \
.max_data_size = IWL60_RTC_DATA_SIZE, \
@@ -167,10 +172,28 @@
.thermal_params = &iwl8000_tt_params, \
.apmg_not_supported = true
+#define IWL_DEVICE_8000 \
+ IWL_DEVICE_8000_COMMON, \
+ .ucode_api_max = IWL8000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL8000_UCODE_API_OK, \
+ .ucode_api_min = IWL8000_UCODE_API_MIN \
+
+#define IWL_DEVICE_8260 \
+ IWL_DEVICE_8000_COMMON, \
+ .ucode_api_max = IWL8000_UCODE_API_MAX, \
+ .ucode_api_ok = IWL8000_UCODE_API_OK, \
+ .ucode_api_min = IWL8000_UCODE_API_MIN \
+
+#define IWL_DEVICE_8265 \
+ IWL_DEVICE_8000_COMMON, \
+ .ucode_api_max = IWL8265_UCODE_API_MAX, \
+ .ucode_api_ok = IWL8265_UCODE_API_OK, \
+ .ucode_api_min = IWL8265_UCODE_API_MIN \
+
const struct iwl_cfg iwl8260_2n_cfg = {
.name = "Intel(R) Dual Band Wireless N 8260",
.fw_name_pre = IWL8000_FW_PRE,
- IWL_DEVICE_8000,
+ IWL_DEVICE_8260,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -179,7 +202,7 @@
const struct iwl_cfg iwl8260_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 8260",
.fw_name_pre = IWL8000_FW_PRE,
- IWL_DEVICE_8000,
+ IWL_DEVICE_8260,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -188,8 +211,8 @@
const struct iwl_cfg iwl8265_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 8265",
- .fw_name_pre = IWL8000_FW_PRE,
- IWL_DEVICE_8000,
+ .fw_name_pre = IWL8265_FW_PRE,
+ IWL_DEVICE_8265,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -209,7 +232,7 @@
const struct iwl_cfg iwl8260_2ac_sdio_cfg = {
.name = "Intel(R) Dual Band Wireless-AC 8260",
.fw_name_pre = IWL8000_FW_PRE,
- IWL_DEVICE_8000,
+ IWL_DEVICE_8260,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
@@ -236,3 +259,4 @@
};
MODULE_FIRMWARE(IWL8000_MODULE_FIRMWARE(IWL8000_UCODE_API_OK));
+MODULE_FIRMWARE(IWL8265_MODULE_FIRMWARE(IWL8265_UCODE_API_OK));
diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
index 7acb490..ab4c2a0 100644
--- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
+++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c
@@ -243,8 +243,10 @@
if (drv->trans->cfg->device_family == IWL_DEVICE_FAMILY_8000) {
char rev_step = 'A' + CSR_HW_REV_STEP(drv->trans->hw_rev);
- snprintf(drv->firmware_name, sizeof(drv->firmware_name),
- "%s%c-%s.ucode", name_pre, rev_step, tag);
+ if (rev_step != 'A')
+ snprintf(drv->firmware_name,
+ sizeof(drv->firmware_name), "%s%c-%s.ucode",
+ name_pre, rev_step, tag);
}
IWL_DEBUG_INFO(drv, "attempting to load firmware %s'%s'\n",
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
index 9a15642..ea1e177 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/scan.c
@@ -1298,6 +1298,10 @@
return -EBUSY;
}
+ /* we don't support "match all" in the firmware */
+ if (!req->n_match_sets)
+ return -EOPNOTSUPP;
+
ret = iwl_mvm_check_running_scans(mvm, type);
if (ret)
return ret;
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
index cc3888e..73c9559 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h
@@ -490,6 +490,15 @@
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
+static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
+ IWL_DEBUG_ISR(trans, "Enabling FW load interrupt\n");
+ trans_pcie->inta_mask = CSR_INT_BIT_FH_TX;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
+}
+
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
index ccafbd8..152cf9a 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c
@@ -1438,9 +1438,11 @@
inta & ~trans_pcie->inta_mask);
}
- /* Re-enable all interrupts */
- /* only Re-enable if disabled by irq */
- if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ /* we are loading the firmware, enable FH_TX interrupt only */
+ if (handled & CSR_INT_BIT_FH_TX)
+ iwl_enable_fw_load_int(trans);
+ /* only Re-enable all interrupt if disabled by irq */
+ else if (test_bit(STATUS_INT_ENABLED, &trans->status))
iwl_enable_interrupts(trans);
/* Re-enable RF_KILL if it occurred */
else if (handled & CSR_INT_BIT_RF_KILL)
diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
index d60a467..5a854c6 100644
--- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c
@@ -1021,82 +1021,6 @@
&first_ucode_section);
}
-static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
- const struct fw_img *fw, bool run_in_rfkill)
-{
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- bool hw_rfkill;
- int ret;
-
- mutex_lock(&trans_pcie->mutex);
-
- /* Someone called stop_device, don't try to start_fw */
- if (trans_pcie->is_down) {
- IWL_WARN(trans,
- "Can't start_fw since the HW hasn't been started\n");
- ret = EIO;
- goto out;
- }
-
- /* This may fail if AMT took ownership of the device */
- if (iwl_pcie_prepare_card_hw(trans)) {
- IWL_WARN(trans, "Exit HW not ready\n");
- ret = -EIO;
- goto out;
- }
-
- iwl_enable_rfkill_int(trans);
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- hw_rfkill = iwl_is_rfkill_set(trans);
- if (hw_rfkill)
- set_bit(STATUS_RFKILL, &trans->status);
- else
- clear_bit(STATUS_RFKILL, &trans->status);
- iwl_trans_pcie_rf_kill(trans, hw_rfkill);
- if (hw_rfkill && !run_in_rfkill) {
- ret = -ERFKILL;
- goto out;
- }
-
- iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
-
- ret = iwl_pcie_nic_init(trans);
- if (ret) {
- IWL_ERR(trans, "Unable to init nic\n");
- goto out;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
- iwl_enable_interrupts(trans);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- /* Load the given image to the HW */
- if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
- ret = iwl_pcie_load_given_ucode_8000(trans, fw);
- else
- ret = iwl_pcie_load_given_ucode(trans, fw);
-
-out:
- mutex_unlock(&trans_pcie->mutex);
- return ret;
-}
-
-static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
-{
- iwl_pcie_reset_ict(trans);
- iwl_pcie_tx_start(trans, scd_addr);
-}
-
static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
@@ -1127,7 +1051,8 @@
* already dead.
*/
if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
- IWL_DEBUG_INFO(trans, "DEVICE_ENABLED bit was set and is now cleared\n");
+ IWL_DEBUG_INFO(trans,
+ "DEVICE_ENABLED bit was set and is now cleared\n");
iwl_pcie_tx_stop(trans);
iwl_pcie_rx_stop(trans);
@@ -1161,7 +1086,6 @@
iwl_disable_interrupts(trans);
spin_unlock(&trans_pcie->irq_lock);
-
/* clear all status bits */
clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
clear_bit(STATUS_INT_ENABLED, &trans->status);
@@ -1194,10 +1118,116 @@
if (hw_rfkill != was_hw_rfkill)
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
- /* re-take ownership to prevent other users from stealing the deivce */
+ /* re-take ownership to prevent other users from stealing the device */
iwl_pcie_prepare_card_hw(trans);
}
+static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
+ const struct fw_img *fw, bool run_in_rfkill)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ bool hw_rfkill;
+ int ret;
+
+ /* This may fail if AMT took ownership of the device */
+ if (iwl_pcie_prepare_card_hw(trans)) {
+ IWL_WARN(trans, "Exit HW not ready\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ iwl_enable_rfkill_int(trans);
+
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ /*
+ * We enabled the RF-Kill interrupt and the handler may very
+ * well be running. Disable the interrupts to make sure no other
+ * interrupt can be fired.
+ */
+ iwl_disable_interrupts(trans);
+
+ /* Make sure it finished running */
+ synchronize_irq(trans_pcie->pci_dev->irq);
+
+ mutex_lock(&trans_pcie->mutex);
+
+ /* If platform's RF_KILL switch is NOT set to KILL */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ if (hw_rfkill && !run_in_rfkill) {
+ ret = -ERFKILL;
+ goto out;
+ }
+
+ /* Someone called stop_device, don't try to start_fw */
+ if (trans_pcie->is_down) {
+ IWL_WARN(trans,
+ "Can't start_fw since the HW hasn't been started\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
+ CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
+
+ /* clear (again), then enable host interrupts */
+ iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
+
+ ret = iwl_pcie_nic_init(trans);
+ if (ret) {
+ IWL_ERR(trans, "Unable to init nic\n");
+ goto out;
+ }
+
+ /*
+ * Now, we load the firmware and don't want to be interrupted, even
+ * by the RF-Kill interrupt (hence mask all the interrupt besides the
+ * FH_TX interrupt which is needed to load the firmware). If the
+ * RF-Kill switch is toggled, we will find out after having loaded
+ * the firmware and return the proper value to the caller.
+ */
+ iwl_enable_fw_load_int(trans);
+
+ /* really make sure rfkill handshake bits are cleared */
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+ iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
+
+ /* Load the given image to the HW */
+ if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
+ ret = iwl_pcie_load_given_ucode_8000(trans, fw);
+ else
+ ret = iwl_pcie_load_given_ucode(trans, fw);
+ iwl_enable_interrupts(trans);
+
+ /* re-check RF-Kill state since we may have missed the interrupt */
+ hw_rfkill = iwl_is_rfkill_set(trans);
+ if (hw_rfkill)
+ set_bit(STATUS_RFKILL, &trans->status);
+ else
+ clear_bit(STATUS_RFKILL, &trans->status);
+
+ iwl_trans_pcie_rf_kill(trans, hw_rfkill);
+ if (hw_rfkill && !run_in_rfkill)
+ ret = -ERFKILL;
+
+out:
+ mutex_unlock(&trans_pcie->mutex);
+ return ret;
+}
+
+static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
+{
+ iwl_pcie_reset_ict(trans);
+ iwl_pcie_tx_start(trans, scd_addr);
+}
+
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
diff --git a/drivers/net/wireless/realtek/rtlwifi/rc.c b/drivers/net/wireless/realtek/rtlwifi/rc.c
index 74c14ce..28f7010 100644
--- a/drivers/net/wireless/realtek/rtlwifi/rc.c
+++ b/drivers/net/wireless/realtek/rtlwifi/rc.c
@@ -138,6 +138,11 @@
((wireless_mode == WIRELESS_MODE_N_5G) ||
(wireless_mode == WIRELESS_MODE_N_24G)))
rate->flags |= IEEE80211_TX_RC_MCS;
+ if (sta && sta->vht_cap.vht_supported &&
+ (wireless_mode == WIRELESS_MODE_AC_5G ||
+ wireless_mode == WIRELESS_MODE_AC_24G ||
+ wireless_mode == WIRELESS_MODE_AC_ONLY))
+ rate->flags |= IEEE80211_TX_RC_VHT_MCS;
}
}
diff --git a/drivers/net/wireless/ti/wlcore/io.c b/drivers/net/wireless/ti/wlcore/io.c
index 9ac118e..564ca75 100644
--- a/drivers/net/wireless/ti/wlcore/io.c
+++ b/drivers/net/wireless/ti/wlcore/io.c
@@ -175,14 +175,14 @@
if (ret < 0)
goto out;
+ /* We don't need the size of the last partition, as it is
+ * automatically calculated based on the total memory size and
+ * the sizes of the previous partitions.
+ */
ret = wlcore_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
if (ret < 0)
goto out;
- ret = wlcore_raw_write32(wl, HW_PART3_SIZE_ADDR, p->mem3.size);
- if (ret < 0)
- goto out;
-
out:
return ret;
}
diff --git a/drivers/net/wireless/ti/wlcore/io.h b/drivers/net/wireless/ti/wlcore/io.h
index 6c257b54..10cf374 100644
--- a/drivers/net/wireless/ti/wlcore/io.h
+++ b/drivers/net/wireless/ti/wlcore/io.h
@@ -36,8 +36,8 @@
#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
-#define HW_PART3_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
-#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 28)
+#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
+
#define HW_ACCESS_REGISTER_SIZE 4
#define HW_ACCESS_PRAM_MAX_RANGE 0x3c000
diff --git a/drivers/nvdimm/bus.c b/drivers/nvdimm/bus.c
index 7e2c43f..5d28e94 100644
--- a/drivers/nvdimm/bus.c
+++ b/drivers/nvdimm/bus.c
@@ -382,18 +382,18 @@
[ND_CMD_ARS_CAP] = {
.in_num = 2,
.in_sizes = { 8, 8, },
+ .out_num = 4,
+ .out_sizes = { 4, 4, 4, 4, },
+ },
+ [ND_CMD_ARS_START] = {
+ .in_num = 5,
+ .in_sizes = { 8, 8, 2, 1, 5, },
.out_num = 2,
.out_sizes = { 4, 4, },
},
- [ND_CMD_ARS_START] = {
- .in_num = 4,
- .in_sizes = { 8, 8, 2, 6, },
- .out_num = 1,
- .out_sizes = { 4, },
- },
[ND_CMD_ARS_STATUS] = {
- .out_num = 2,
- .out_sizes = { 4, UINT_MAX, },
+ .out_num = 3,
+ .out_sizes = { 4, 4, UINT_MAX, },
},
};
@@ -442,8 +442,8 @@
return in_field[1];
else if (nvdimm && cmd == ND_CMD_VENDOR && idx == 2)
return out_field[1];
- else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 1)
- return ND_CMD_ARS_STATUS_MAX;
+ else if (!nvdimm && cmd == ND_CMD_ARS_STATUS && idx == 2)
+ return out_field[1] - 8;
return UINT_MAX;
}
diff --git a/drivers/nvdimm/pmem.c b/drivers/nvdimm/pmem.c
index 7edf316..8d0b546 100644
--- a/drivers/nvdimm/pmem.c
+++ b/drivers/nvdimm/pmem.c
@@ -41,7 +41,7 @@
phys_addr_t phys_addr;
/* when non-zero this device is hosting a 'pfn' instance */
phys_addr_t data_offset;
- unsigned long pfn_flags;
+ u64 pfn_flags;
void __pmem *virt_addr;
size_t size;
struct badblocks bb;
diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig
index 5d62373..b586d84 100644
--- a/drivers/nvme/host/Kconfig
+++ b/drivers/nvme/host/Kconfig
@@ -17,5 +17,6 @@
and block devices nodes, as well a a translation for a small
number of selected SCSI commands to NVMe commands to the NVMe
driver. If you don't know what this means you probably want
- to say N here, and if you know what it means you probably
- want to say N as well.
+ to say N here, unless you run a distro that abuses the SCSI
+ emulation to provide stable device names for mount by id, like
+ some OpenSuSE and SLES versions.
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index c5bf001..3cd921e 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1121,7 +1121,6 @@
ns->queue = blk_mq_init_queue(ctrl->tagset);
if (IS_ERR(ns->queue))
goto out_free_ns;
- queue_flag_set_unlocked(QUEUE_FLAG_NOMERGES, ns->queue);
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, ns->queue);
ns->queue->queuedata = ns;
ns->ctrl = ctrl;
diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c
index 5cd3725..6bb15e4 100644
--- a/drivers/nvme/host/lightnvm.c
+++ b/drivers/nvme/host/lightnvm.c
@@ -146,9 +146,10 @@
};
};
+#define NVME_NVM_LP_MLC_PAIRS 886
struct nvme_nvm_lp_mlc {
__u16 num_pairs;
- __u8 pairs[886];
+ __u8 pairs[NVME_NVM_LP_MLC_PAIRS];
};
struct nvme_nvm_lp_tbl {
@@ -282,9 +283,14 @@
memcpy(dst->lptbl.id, src->lptbl.id, 8);
dst->lptbl.mlc.num_pairs =
le16_to_cpu(src->lptbl.mlc.num_pairs);
- /* 4 bits per pair */
+
+ if (dst->lptbl.mlc.num_pairs > NVME_NVM_LP_MLC_PAIRS) {
+ pr_err("nvm: number of MLC pairs not supported\n");
+ return -EINVAL;
+ }
+
memcpy(dst->lptbl.mlc.pairs, src->lptbl.mlc.pairs,
- dst->lptbl.mlc.num_pairs >> 1);
+ dst->lptbl.mlc.num_pairs);
}
}
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 4fb5bb7..9664d07 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -139,9 +139,9 @@
u32 val = 0;
if (ctrl->ops->io_incapable(ctrl))
- return false;
+ return true;
if (ctrl->ops->reg_read32(ctrl, NVME_REG_CSTS, &val))
- return false;
+ return true;
return val & NVME_CSTS_CFS;
}
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index 72ef832..a128672 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -678,6 +678,11 @@
blk_mq_start_request(req);
spin_lock_irq(&nvmeq->q_lock);
+ if (unlikely(nvmeq->cq_vector < 0)) {
+ ret = BLK_MQ_RQ_QUEUE_BUSY;
+ spin_unlock_irq(&nvmeq->q_lock);
+ goto out;
+ }
__nvme_submit_cmd(nvmeq, &cmnd);
nvme_process_cq(nvmeq);
spin_unlock_irq(&nvmeq->q_lock);
@@ -999,7 +1004,7 @@
if (!blk_mq_request_started(req))
return;
- dev_warn(nvmeq->q_dmadev,
+ dev_dbg_ratelimited(nvmeq->q_dmadev,
"Cancelling I/O %d QID %d\n", req->tag, nvmeq->qid);
status = NVME_SC_ABORT_REQ;
@@ -2111,16 +2116,12 @@
{
struct nvme_dev *dev = pci_get_drvdata(pdev);
- spin_lock(&dev_list_lock);
- list_del_init(&dev->node);
- spin_unlock(&dev_list_lock);
-
pci_set_drvdata(pdev, NULL);
- flush_work(&dev->reset_work);
flush_work(&dev->scan_work);
nvme_remove_namespaces(&dev->ctrl);
nvme_uninit_ctrl(&dev->ctrl);
nvme_dev_disable(dev, true);
+ flush_work(&dev->reset_work);
nvme_dev_remove_admin(dev);
nvme_free_queues(dev, 0);
nvme_release_cmb(dev);
diff --git a/drivers/nvmem/core.c b/drivers/nvmem/core.c
index 6fd4e5a..9d11d98 100644
--- a/drivers/nvmem/core.c
+++ b/drivers/nvmem/core.c
@@ -70,6 +70,9 @@
if (pos >= nvmem->size)
return 0;
+ if (count < nvmem->word_size)
+ return -EINVAL;
+
if (pos + count > nvmem->size)
count = nvmem->size - pos;
@@ -95,6 +98,9 @@
if (pos >= nvmem->size)
return 0;
+ if (count < nvmem->word_size)
+ return -EINVAL;
+
if (pos + count > nvmem->size)
count = nvmem->size - pos;
diff --git a/drivers/nvmem/qfprom.c b/drivers/nvmem/qfprom.c
index afb67e7..3829e5f 100644
--- a/drivers/nvmem/qfprom.c
+++ b/drivers/nvmem/qfprom.c
@@ -21,6 +21,7 @@
.reg_bits = 32,
.val_bits = 8,
.reg_stride = 1,
+ .val_format_endian = REGMAP_ENDIAN_LITTLE,
};
static struct nvmem_config econfig = {
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 7ee21ae..e7bfc17 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -635,6 +635,13 @@
msi_base = be32_to_cpup(msi_map + 2);
rid_len = be32_to_cpup(msi_map + 3);
+ if (rid_base & ~map_mask) {
+ dev_err(parent_dev,
+ "Invalid msi-map translation - msi-map-mask (0x%x) ignores rid-base (0x%x)\n",
+ map_mask, rid_base);
+ return rid_out;
+ }
+
msi_controller_node = of_find_node_by_phandle(phandle);
matched = (masked_rid >= rid_base &&
@@ -654,7 +661,7 @@
if (!matched)
return rid_out;
- rid_out = masked_rid + msi_base;
+ rid_out = masked_rid - rid_base + msi_base;
dev_dbg(dev,
"msi-map at: %s, using mask %08x, rid-base: %08x, msi-base: %08x, length: %08x, rid: %08x -> %08x\n",
dev_name(parent_dev), map_mask, rid_base, msi_base,
diff --git a/drivers/of/of_mdio.c b/drivers/of/of_mdio.c
index 5648317..39c4be4 100644
--- a/drivers/of/of_mdio.c
+++ b/drivers/of/of_mdio.c
@@ -154,6 +154,7 @@
{ .compatible = "marvell,88E1111", },
{ .compatible = "marvell,88e1116", },
{ .compatible = "marvell,88e1118", },
+ { .compatible = "marvell,88e1145", },
{ .compatible = "marvell,88e1149r", },
{ .compatible = "marvell,88e1310", },
{ .compatible = "marvell,88E1510", },
diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig
index 75a6054..d1cdd9c 100644
--- a/drivers/pci/host/Kconfig
+++ b/drivers/pci/host/Kconfig
@@ -14,6 +14,7 @@
config PCI_MVEBU
bool "Marvell EBU PCIe controller"
depends on ARCH_MVEBU || ARCH_DOVE
+ depends on ARM
depends on OF
config PCIE_DW
diff --git a/drivers/pci/host/pcie-iproc.c b/drivers/pci/host/pcie-iproc.c
index 5816bce..a576aee 100644
--- a/drivers/pci/host/pcie-iproc.c
+++ b/drivers/pci/host/pcie-iproc.c
@@ -64,7 +64,6 @@
#define OARR_SIZE_CFG BIT(OARR_SIZE_CFG_SHIFT)
#define MAX_NUM_OB_WINDOWS 2
-#define MAX_NUM_PAXC_PF 4
#define IPROC_PCIE_REG_INVALID 0xffff
@@ -170,20 +169,6 @@
writel(val, pcie->base + offset + (window * 8));
}
-static inline bool iproc_pcie_device_is_valid(struct iproc_pcie *pcie,
- unsigned int slot,
- unsigned int fn)
-{
- if (slot > 0)
- return false;
-
- /* PAXC can only support limited number of functions */
- if (pcie->type == IPROC_PCIE_PAXC && fn >= MAX_NUM_PAXC_PF)
- return false;
-
- return true;
-}
-
/**
* Note access to the configuration registers are protected at the higher layer
* by 'pci_lock' in drivers/pci/access.c
@@ -199,11 +184,11 @@
u32 val;
u16 offset;
- if (!iproc_pcie_device_is_valid(pcie, slot, fn))
- return NULL;
-
/* root complex access */
if (busno == 0) {
+ if (slot > 0 || fn > 0)
+ return NULL;
+
iproc_pcie_write_reg(pcie, IPROC_PCIE_CFG_IND_ADDR,
where & CFG_IND_ADDR_MASK);
offset = iproc_pcie_reg_offset(pcie, IPROC_PCIE_CFG_IND_DATA);
@@ -213,6 +198,14 @@
return (pcie->base + offset);
}
+ /*
+ * PAXC is connected to an internally emulated EP within the SoC. It
+ * allows only one device.
+ */
+ if (pcie->type == IPROC_PCIE_PAXC)
+ if (slot > 0)
+ return NULL;
+
/* EP device access */
val = (busno << CFG_ADDR_BUS_NUM_SHIFT) |
(slot << CFG_ADDR_DEV_NUM_SHIFT) |
diff --git a/drivers/pci/pcie/aer/aerdrv.c b/drivers/pci/pcie/aer/aerdrv.c
index 0bf82a2..48d21e0 100644
--- a/drivers/pci/pcie/aer/aerdrv.c
+++ b/drivers/pci/pcie/aer/aerdrv.c
@@ -262,7 +262,6 @@
rpc->rpd = dev;
INIT_WORK(&rpc->dpc_handler, aer_isr);
mutex_init(&rpc->rpc_mutex);
- init_waitqueue_head(&rpc->wait_release);
/* Use PCIe bus function to store rpc into PCIe device */
set_service_data(dev, rpc);
@@ -285,8 +284,7 @@
if (rpc->isr)
free_irq(dev->irq, dev);
- wait_event(rpc->wait_release, rpc->prod_idx == rpc->cons_idx);
-
+ flush_work(&rpc->dpc_handler);
aer_disable_rootport(rpc);
kfree(rpc);
set_service_data(dev, NULL);
diff --git a/drivers/pci/pcie/aer/aerdrv.h b/drivers/pci/pcie/aer/aerdrv.h
index 84420b7..945c939 100644
--- a/drivers/pci/pcie/aer/aerdrv.h
+++ b/drivers/pci/pcie/aer/aerdrv.h
@@ -72,7 +72,6 @@
* recovery on the same
* root port hierarchy
*/
- wait_queue_head_t wait_release;
};
struct aer_broadcast_data {
diff --git a/drivers/pci/pcie/aer/aerdrv_core.c b/drivers/pci/pcie/aer/aerdrv_core.c
index 7123925..521e39c 100644
--- a/drivers/pci/pcie/aer/aerdrv_core.c
+++ b/drivers/pci/pcie/aer/aerdrv_core.c
@@ -811,8 +811,6 @@
while (get_e_source(rpc, &e_src))
aer_isr_one_error(p_device, &e_src);
mutex_unlock(&rpc->rpc_mutex);
-
- wake_up(&rpc->wait_release);
}
/**
diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c
index c777b97..5f70fee 100644
--- a/drivers/pci/xen-pcifront.c
+++ b/drivers/pci/xen-pcifront.c
@@ -53,7 +53,7 @@
};
struct pcifront_sd {
- int domain;
+ struct pci_sysdata sd;
struct pcifront_device *pdev;
};
@@ -67,7 +67,9 @@
unsigned int domain, unsigned int bus,
struct pcifront_device *pdev)
{
- sd->domain = domain;
+ /* Because we do not expose that information via XenBus. */
+ sd->sd.node = first_online_node;
+ sd->sd.domain = domain;
sd->pdev = pdev;
}
@@ -468,8 +470,8 @@
dev_info(&pdev->xdev->dev, "Creating PCI Frontend Bus %04x:%02x\n",
domain, bus);
- bus_entry = kmalloc(sizeof(*bus_entry), GFP_KERNEL);
- sd = kmalloc(sizeof(*sd), GFP_KERNEL);
+ bus_entry = kzalloc(sizeof(*bus_entry), GFP_KERNEL);
+ sd = kzalloc(sizeof(*sd), GFP_KERNEL);
if (!bus_entry || !sd) {
err = -ENOMEM;
goto err_out;
diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig
index e7e117d..0124d17 100644
--- a/drivers/phy/Kconfig
+++ b/drivers/phy/Kconfig
@@ -224,6 +224,7 @@
config PHY_HI6220_USB
tristate "hi6220 USB PHY support"
+ depends on (ARCH_HISI && ARM64) || COMPILE_TEST
select GENERIC_PHY
select MFD_SYSCON
help
diff --git a/drivers/phy/phy-core.c b/drivers/phy/phy-core.c
index 8c7f27d..e7e574d 100644
--- a/drivers/phy/phy-core.c
+++ b/drivers/phy/phy-core.c
@@ -275,20 +275,21 @@
int phy_power_on(struct phy *phy)
{
- int ret;
+ int ret = 0;
if (!phy)
- return 0;
+ goto out;
if (phy->pwr) {
ret = regulator_enable(phy->pwr);
if (ret)
- return ret;
+ goto out;
}
ret = phy_pm_runtime_get_sync(phy);
if (ret < 0 && ret != -ENOTSUPP)
- return ret;
+ goto err_pm_sync;
+
ret = 0; /* Override possible ret == -ENOTSUPP */
mutex_lock(&phy->mutex);
@@ -296,19 +297,20 @@
ret = phy->ops->power_on(phy);
if (ret < 0) {
dev_err(&phy->dev, "phy poweron failed --> %d\n", ret);
- goto out;
+ goto err_pwr_on;
}
}
++phy->power_count;
mutex_unlock(&phy->mutex);
return 0;
-out:
+err_pwr_on:
mutex_unlock(&phy->mutex);
phy_pm_runtime_put_sync(phy);
+err_pm_sync:
if (phy->pwr)
regulator_disable(phy->pwr);
-
+out:
return ret;
}
EXPORT_SYMBOL_GPL(phy_power_on);
diff --git a/drivers/phy/phy-twl4030-usb.c b/drivers/phy/phy-twl4030-usb.c
index 4a3fc6e..840f3ea 100644
--- a/drivers/phy/phy-twl4030-usb.c
+++ b/drivers/phy/phy-twl4030-usb.c
@@ -715,6 +715,7 @@
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_set_autosuspend_delay(&pdev->dev, 2000);
pm_runtime_enable(&pdev->dev);
+ pm_runtime_get_sync(&pdev->dev);
/* Our job is to use irqs and status from the power module
* to keep the transceiver disabled when nothing's connected.
@@ -750,6 +751,7 @@
struct twl4030_usb *twl = platform_get_drvdata(pdev);
int val;
+ usb_remove_phy(&twl->phy);
pm_runtime_get_sync(twl->dev);
cancel_delayed_work(&twl->id_workaround_work);
device_remove_file(twl->dev, &dev_attr_vbus);
@@ -757,6 +759,13 @@
/* set transceiver mode to power on defaults */
twl4030_usb_set_mode(twl, -1);
+ /* idle ulpi before powering off */
+ if (cable_present(twl->linkstat))
+ pm_runtime_put_noidle(twl->dev);
+ pm_runtime_mark_last_busy(twl->dev);
+ pm_runtime_put_sync_suspend(twl->dev);
+ pm_runtime_disable(twl->dev);
+
/* autogate 60MHz ULPI clock,
* clear dpll clock request for i2c access,
* disable 32KHz
@@ -771,11 +780,6 @@
/* disable complete OTG block */
twl4030_usb_clear_bits(twl, POWER_CTRL, POWER_CTRL_OTG_ENAB);
- if (cable_present(twl->linkstat))
- pm_runtime_put_noidle(twl->dev);
- pm_runtime_mark_last_busy(twl->dev);
- pm_runtime_put(twl->dev);
-
return 0;
}
diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
index 16d48a4..e96e86d 100644
--- a/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
+++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common.c
@@ -347,6 +347,7 @@
ret = mtk_pconf_set_pull_select(pctl, pin, true, false, arg);
break;
case PIN_CONFIG_INPUT_ENABLE:
+ mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
break;
case PIN_CONFIG_OUTPUT:
@@ -354,6 +355,7 @@
ret = mtk_pmx_gpio_set_direction(pctldev, NULL, pin, false);
break;
case PIN_CONFIG_INPUT_SCHMITT_ENABLE:
+ mtk_pmx_gpio_set_direction(pctldev, NULL, pin, true);
ret = mtk_pconf_set_ies_smt(pctl, pin, arg, param);
break;
case PIN_CONFIG_DRIVE_STRENGTH:
diff --git a/drivers/pinctrl/mvebu/pinctrl-mvebu.c b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
index e4d4738..3ef798f 100644
--- a/drivers/pinctrl/mvebu/pinctrl-mvebu.c
+++ b/drivers/pinctrl/mvebu/pinctrl-mvebu.c
@@ -666,16 +666,19 @@
struct mvebu_mpp_ctrl_setting *set = &mode->settings[0];
struct mvebu_pinctrl_group *grp;
unsigned num_settings;
+ unsigned supp_settings;
- for (num_settings = 0; ; set++) {
+ for (num_settings = 0, supp_settings = 0; ; set++) {
if (!set->name)
break;
+ num_settings++;
+
/* skip unsupported settings for this variant */
if (pctl->variant && !(pctl->variant & set->variant))
continue;
- num_settings++;
+ supp_settings++;
/* find gpio/gpo/gpi settings */
if (strcmp(set->name, "gpio") == 0)
@@ -688,7 +691,7 @@
}
/* skip modes with no settings for this variant */
- if (!num_settings)
+ if (!supp_settings)
continue;
grp = mvebu_pinctrl_find_group_by_pid(pctl, mode->pid);
diff --git a/drivers/pinctrl/nomadik/pinctrl-abx500.c b/drivers/pinctrl/nomadik/pinctrl-abx500.c
index 085e601..1f7469c 100644
--- a/drivers/pinctrl/nomadik/pinctrl-abx500.c
+++ b/drivers/pinctrl/nomadik/pinctrl-abx500.c
@@ -191,6 +191,7 @@
dev_err(pct->dev, "%s write failed (%d)\n", __func__, ret);
}
+#ifdef CONFIG_DEBUG_FS
static int abx500_get_pull_updown(struct abx500_pinctrl *pct, int offset,
enum abx500_gpio_pull_updown *pull_updown)
{
@@ -226,6 +227,7 @@
return ret;
}
+#endif
static int abx500_set_pull_updown(struct abx500_pinctrl *pct,
int offset, enum abx500_gpio_pull_updown val)
@@ -468,6 +470,7 @@
return ret;
}
+#ifdef CONFIG_DEBUG_FS
static int abx500_get_mode(struct pinctrl_dev *pctldev, struct gpio_chip *chip,
unsigned gpio)
{
@@ -553,8 +556,6 @@
return ret;
}
-#ifdef CONFIG_DEBUG_FS
-
#include <linux/seq_file.h>
static void abx500_gpio_dbg_show_one(struct seq_file *s,
diff --git a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
index d90e205..216f227 100644
--- a/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
+++ b/drivers/pinctrl/pxa/pinctrl-pxa2xx.c
@@ -426,6 +426,7 @@
return 0;
}
+EXPORT_SYMBOL(pxa2xx_pinctrl_init);
int pxa2xx_pinctrl_exit(struct platform_device *pdev)
{
diff --git a/drivers/pinctrl/samsung/pinctrl-samsung.c b/drivers/pinctrl/samsung/pinctrl-samsung.c
index f67b1e9..5cc97f8 100644
--- a/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -514,25 +514,35 @@
.pin_config_group_set = samsung_pinconf_group_set,
};
-/* gpiolib gpio_set callback function */
-static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+/*
+ * The samsung_gpio_set_vlaue() should be called with "bank->slock" held
+ * to avoid race condition.
+ */
+static void samsung_gpio_set_value(struct gpio_chip *gc,
+ unsigned offset, int value)
{
struct samsung_pin_bank *bank = gpiochip_get_data(gc);
const struct samsung_pin_bank_type *type = bank->type;
- unsigned long flags;
void __iomem *reg;
u32 data;
reg = bank->drvdata->virt_base + bank->pctl_offset;
- spin_lock_irqsave(&bank->slock, flags);
-
data = readl(reg + type->reg_offset[PINCFG_TYPE_DAT]);
data &= ~(1 << offset);
if (value)
data |= 1 << offset;
writel(data, reg + type->reg_offset[PINCFG_TYPE_DAT]);
+}
+/* gpiolib gpio_set callback function */
+static void samsung_gpio_set(struct gpio_chip *gc, unsigned offset, int value)
+{
+ struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+ unsigned long flags;
+
+ spin_lock_irqsave(&bank->slock, flags);
+ samsung_gpio_set_value(gc, offset, value);
spin_unlock_irqrestore(&bank->slock, flags);
}
@@ -553,6 +563,8 @@
}
/*
+ * The samsung_gpio_set_direction() should be called with "bank->slock" held
+ * to avoid race condition.
* The calls to gpio_direction_output() and gpio_direction_input()
* leads to this function call.
*/
@@ -564,7 +576,6 @@
struct samsung_pinctrl_drv_data *drvdata;
void __iomem *reg;
u32 data, mask, shift;
- unsigned long flags;
bank = gpiochip_get_data(gc);
type = bank->type;
@@ -581,31 +592,42 @@
reg += 4;
}
- spin_lock_irqsave(&bank->slock, flags);
-
data = readl(reg);
data &= ~(mask << shift);
if (!input)
data |= FUNC_OUTPUT << shift;
writel(data, reg);
- spin_unlock_irqrestore(&bank->slock, flags);
-
return 0;
}
/* gpiolib gpio_direction_input callback function. */
static int samsung_gpio_direction_input(struct gpio_chip *gc, unsigned offset)
{
- return samsung_gpio_set_direction(gc, offset, true);
+ struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&bank->slock, flags);
+ ret = samsung_gpio_set_direction(gc, offset, true);
+ spin_unlock_irqrestore(&bank->slock, flags);
+ return ret;
}
/* gpiolib gpio_direction_output callback function. */
static int samsung_gpio_direction_output(struct gpio_chip *gc, unsigned offset,
int value)
{
- samsung_gpio_set(gc, offset, value);
- return samsung_gpio_set_direction(gc, offset, false);
+ struct samsung_pin_bank *bank = gpiochip_get_data(gc);
+ unsigned long flags;
+ int ret;
+
+ spin_lock_irqsave(&bank->slock, flags);
+ samsung_gpio_set_value(gc, offset, value);
+ ret = samsung_gpio_set_direction(gc, offset, false);
+ spin_unlock_irqrestore(&bank->slock, flags);
+
+ return ret;
}
/*
diff --git a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
index 77d4cf0..11760bb 100644
--- a/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
+++ b/drivers/pinctrl/sunxi/pinctrl-sun8i-h3.c
@@ -492,6 +492,7 @@
.pins = sun8i_h3_pins,
.npins = ARRAY_SIZE(sun8i_h3_pins),
.irq_banks = 2,
+ .irq_read_needs_mux = true
};
static int sun8i_h3_pinctrl_probe(struct platform_device *pdev)
diff --git a/drivers/platform/x86/intel-hid.c b/drivers/platform/x86/intel-hid.c
index 20f0ad9..e20f23e 100644
--- a/drivers/platform/x86/intel-hid.c
+++ b/drivers/platform/x86/intel-hid.c
@@ -41,8 +41,7 @@
{ KE_KEY, 4, { KEY_HOME } },
{ KE_KEY, 5, { KEY_END } },
{ KE_KEY, 6, { KEY_PAGEUP } },
- { KE_KEY, 4, { KEY_PAGEDOWN } },
- { KE_KEY, 4, { KEY_HOME } },
+ { KE_KEY, 7, { KEY_PAGEDOWN } },
{ KE_KEY, 8, { KEY_RFKILL } },
{ KE_KEY, 9, { KEY_POWER } },
{ KE_KEY, 11, { KEY_SLEEP } },
diff --git a/drivers/platform/x86/intel_scu_ipcutil.c b/drivers/platform/x86/intel_scu_ipcutil.c
index 02bc5a6..aa45424 100644
--- a/drivers/platform/x86/intel_scu_ipcutil.c
+++ b/drivers/platform/x86/intel_scu_ipcutil.c
@@ -49,7 +49,7 @@
static int scu_reg_access(u32 cmd, struct scu_ipc_data *data)
{
- int count = data->count;
+ unsigned int count = data->count;
if (count == 0 || count == 3 || count > 4)
return -EINVAL;
diff --git a/drivers/power/bq27xxx_battery_i2c.c b/drivers/power/bq27xxx_battery_i2c.c
index 9429e66..8eafc6f 100644
--- a/drivers/power/bq27xxx_battery_i2c.c
+++ b/drivers/power/bq27xxx_battery_i2c.c
@@ -21,6 +21,9 @@
#include <linux/power/bq27xxx_battery.h>
+static DEFINE_IDR(battery_id);
+static DEFINE_MUTEX(battery_mutex);
+
static irqreturn_t bq27xxx_battery_irq_handler_thread(int irq, void *data)
{
struct bq27xxx_device_info *di = data;
@@ -70,19 +73,33 @@
{
struct bq27xxx_device_info *di;
int ret;
+ char *name;
+ int num;
+
+ /* Get new ID for the new battery device */
+ mutex_lock(&battery_mutex);
+ num = idr_alloc(&battery_id, client, 0, 0, GFP_KERNEL);
+ mutex_unlock(&battery_mutex);
+ if (num < 0)
+ return num;
+
+ name = devm_kasprintf(&client->dev, GFP_KERNEL, "%s-%d", id->name, num);
+ if (!name)
+ goto err_mem;
di = devm_kzalloc(&client->dev, sizeof(*di), GFP_KERNEL);
if (!di)
- return -ENOMEM;
+ goto err_mem;
+ di->id = num;
di->dev = &client->dev;
di->chip = id->driver_data;
- di->name = id->name;
+ di->name = name;
di->bus.read = bq27xxx_battery_i2c_read;
ret = bq27xxx_battery_setup(di);
if (ret)
- return ret;
+ goto err_failed;
/* Schedule a polling after about 1 min */
schedule_delayed_work(&di->work, 60 * HZ);
@@ -103,6 +120,16 @@
}
return 0;
+
+err_mem:
+ ret = -ENOMEM;
+
+err_failed:
+ mutex_lock(&battery_mutex);
+ idr_remove(&battery_id, num);
+ mutex_unlock(&battery_mutex);
+
+ return ret;
}
static int bq27xxx_battery_i2c_remove(struct i2c_client *client)
@@ -111,6 +138,10 @@
bq27xxx_battery_teardown(di);
+ mutex_lock(&battery_mutex);
+ idr_remove(&battery_id, di->id);
+ mutex_unlock(&battery_mutex);
+
return 0;
}
diff --git a/drivers/s390/block/dasd.c b/drivers/s390/block/dasd.c
index 41605da..c78db05 100644
--- a/drivers/s390/block/dasd.c
+++ b/drivers/s390/block/dasd.c
@@ -3035,6 +3035,7 @@
max = block->base->discipline->max_blocks << block->s2b_shift;
}
queue_flag_set_unlocked(QUEUE_FLAG_NONROT, block->request_queue);
+ block->request_queue->limits.max_dev_sectors = max;
blk_queue_logical_block_size(block->request_queue,
block->bp_block);
blk_queue_max_hw_sectors(block->request_queue, max);
diff --git a/drivers/s390/block/dasd_alias.c b/drivers/s390/block/dasd_alias.c
index 184b1db..286782c 100644
--- a/drivers/s390/block/dasd_alias.c
+++ b/drivers/s390/block/dasd_alias.c
@@ -264,8 +264,10 @@
spin_unlock_irqrestore(&lcu->lock, flags);
cancel_work_sync(&lcu->suc_data.worker);
spin_lock_irqsave(&lcu->lock, flags);
- if (device == lcu->suc_data.device)
+ if (device == lcu->suc_data.device) {
+ dasd_put_device(device);
lcu->suc_data.device = NULL;
+ }
}
was_pending = 0;
if (device == lcu->ruac_data.device) {
@@ -273,8 +275,10 @@
was_pending = 1;
cancel_delayed_work_sync(&lcu->ruac_data.dwork);
spin_lock_irqsave(&lcu->lock, flags);
- if (device == lcu->ruac_data.device)
+ if (device == lcu->ruac_data.device) {
+ dasd_put_device(device);
lcu->ruac_data.device = NULL;
+ }
}
private->lcu = NULL;
spin_unlock_irqrestore(&lcu->lock, flags);
@@ -549,8 +553,10 @@
if ((rc && (rc != -EOPNOTSUPP)) || (lcu->flags & NEED_UAC_UPDATE)) {
DBF_DEV_EVENT(DBF_WARNING, device, "could not update"
" alias data in lcu (rc = %d), retry later", rc);
- schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ);
+ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 30*HZ))
+ dasd_put_device(device);
} else {
+ dasd_put_device(device);
lcu->ruac_data.device = NULL;
lcu->flags &= ~UPDATE_PENDING;
}
@@ -593,8 +599,10 @@
*/
if (!usedev)
return -EINVAL;
+ dasd_get_device(usedev);
lcu->ruac_data.device = usedev;
- schedule_delayed_work(&lcu->ruac_data.dwork, 0);
+ if (!schedule_delayed_work(&lcu->ruac_data.dwork, 0))
+ dasd_put_device(usedev);
return 0;
}
@@ -723,7 +731,7 @@
ASCEBC((char *) &cqr->magic, 4);
ccw = cqr->cpaddr;
ccw->cmd_code = DASD_ECKD_CCW_RSCK;
- ccw->flags = 0 ;
+ ccw->flags = CCW_FLAG_SLI;
ccw->count = 16;
ccw->cda = (__u32)(addr_t) cqr->data;
((char *)cqr->data)[0] = reason;
@@ -930,6 +938,7 @@
/* 3. read new alias configuration */
_schedule_lcu_update(lcu, device);
lcu->suc_data.device = NULL;
+ dasd_put_device(device);
spin_unlock_irqrestore(&lcu->lock, flags);
}
@@ -989,6 +998,8 @@
}
lcu->suc_data.reason = reason;
lcu->suc_data.device = device;
+ dasd_get_device(device);
spin_unlock(&lcu->lock);
- schedule_work(&lcu->suc_data.worker);
+ if (!schedule_work(&lcu->suc_data.worker))
+ dasd_put_device(device);
};
diff --git a/drivers/scsi/device_handler/scsi_dh_rdac.c b/drivers/scsi/device_handler/scsi_dh_rdac.c
index 3613581..93880ed 100644
--- a/drivers/scsi/device_handler/scsi_dh_rdac.c
+++ b/drivers/scsi/device_handler/scsi_dh_rdac.c
@@ -562,7 +562,7 @@
/*
* Command Lock contention
*/
- err = SCSI_DH_RETRY;
+ err = SCSI_DH_IMM_RETRY;
break;
default:
break;
@@ -612,6 +612,8 @@
err = mode_select_handle_sense(sdev, h->sense);
if (err == SCSI_DH_RETRY && retry_cnt--)
goto retry;
+ if (err == SCSI_DH_IMM_RETRY)
+ goto retry;
}
if (err == SCSI_DH_OK) {
h->state = RDAC_STATE_ACTIVE;
diff --git a/drivers/scsi/hisi_sas/Kconfig b/drivers/scsi/hisi_sas/Kconfig
index b676618..d1dd161 100644
--- a/drivers/scsi/hisi_sas/Kconfig
+++ b/drivers/scsi/hisi_sas/Kconfig
@@ -1,6 +1,6 @@
config SCSI_HISI_SAS
tristate "HiSilicon SAS"
- depends on HAS_DMA
+ depends on HAS_DMA && HAS_IOMEM
depends on ARM64 || COMPILE_TEST
select SCSI_SAS_LIBSAS
select BLK_DEV_INTEGRITY
diff --git a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
index 057fdeb..eea24d7 100644
--- a/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
+++ b/drivers/scsi/hisi_sas/hisi_sas_v1_hw.c
@@ -1289,13 +1289,10 @@
goto out;
}
- if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK) {
- if (!(cmplt_hdr_data & CMPLT_HDR_CMD_CMPLT_MSK) ||
- !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK))
- ts->stat = SAS_DATA_OVERRUN;
- else
- slot_err_v1_hw(hisi_hba, task, slot);
+ if (cmplt_hdr_data & CMPLT_HDR_ERR_RCRD_XFRD_MSK &&
+ !(cmplt_hdr_data & CMPLT_HDR_RSPNS_XFRD_MSK)) {
+ slot_err_v1_hw(hisi_hba, task, slot);
goto out;
}
diff --git a/drivers/scsi/qla2xxx/qla_init.c b/drivers/scsi/qla2xxx/qla_init.c
index 52a8765..692a757 100644
--- a/drivers/scsi/qla2xxx/qla_init.c
+++ b/drivers/scsi/qla2xxx/qla_init.c
@@ -2204,7 +2204,7 @@
/* Clear outstanding commands array. */
for (que = 0; que < ha->max_req_queues; que++) {
req = ha->req_q_map[que];
- if (!req)
+ if (!req || !test_bit(que, ha->req_qid_map))
continue;
req->out_ptr = (void *)(req->ring + req->length);
*req->out_ptr = 0;
@@ -2221,7 +2221,7 @@
for (que = 0; que < ha->max_rsp_queues; que++) {
rsp = ha->rsp_q_map[que];
- if (!rsp)
+ if (!rsp || !test_bit(que, ha->rsp_qid_map))
continue;
rsp->in_ptr = (void *)(rsp->ring + rsp->length);
*rsp->in_ptr = 0;
@@ -4981,7 +4981,7 @@
for (i = 1; i < ha->max_rsp_queues; i++) {
rsp = ha->rsp_q_map[i];
- if (rsp) {
+ if (rsp && test_bit(i, ha->rsp_qid_map)) {
rsp->options &= ~BIT_0;
ret = qla25xx_init_rsp_que(base_vha, rsp);
if (ret != QLA_SUCCESS)
@@ -4996,8 +4996,8 @@
}
for (i = 1; i < ha->max_req_queues; i++) {
req = ha->req_q_map[i];
- if (req) {
- /* Clear outstanding commands array. */
+ if (req && test_bit(i, ha->req_qid_map)) {
+ /* Clear outstanding commands array. */
req->options &= ~BIT_0;
ret = qla25xx_init_req_que(base_vha, req);
if (ret != QLA_SUCCESS)
diff --git a/drivers/scsi/qla2xxx/qla_isr.c b/drivers/scsi/qla2xxx/qla_isr.c
index d4d65eb..4af9547 100644
--- a/drivers/scsi/qla2xxx/qla_isr.c
+++ b/drivers/scsi/qla2xxx/qla_isr.c
@@ -3063,9 +3063,9 @@
"MSI-X: Failed to enable support "
"-- %d/%d\n Retry with %d vectors.\n",
ha->msix_count, ret, ret);
+ ha->msix_count = ret;
+ ha->max_rsp_queues = ha->msix_count - 1;
}
- ha->msix_count = ret;
- ha->max_rsp_queues = ha->msix_count - 1;
ha->msix_entries = kzalloc(sizeof(struct qla_msix_entry) *
ha->msix_count, GFP_KERNEL);
if (!ha->msix_entries) {
diff --git a/drivers/scsi/qla2xxx/qla_mid.c b/drivers/scsi/qla2xxx/qla_mid.c
index c5dd594..cf7ba52 100644
--- a/drivers/scsi/qla2xxx/qla_mid.c
+++ b/drivers/scsi/qla2xxx/qla_mid.c
@@ -600,7 +600,7 @@
/* Delete request queues */
for (cnt = 1; cnt < ha->max_req_queues; cnt++) {
req = ha->req_q_map[cnt];
- if (req) {
+ if (req && test_bit(cnt, ha->req_qid_map)) {
ret = qla25xx_delete_req_que(vha, req);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00ea,
@@ -614,7 +614,7 @@
/* Delete response queues */
for (cnt = 1; cnt < ha->max_rsp_queues; cnt++) {
rsp = ha->rsp_q_map[cnt];
- if (rsp) {
+ if (rsp && test_bit(cnt, ha->rsp_qid_map)) {
ret = qla25xx_delete_rsp_que(vha, rsp);
if (ret != QLA_SUCCESS) {
ql_log(ql_log_warn, vha, 0x00eb,
diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
index f1788db..f6c7ce3 100644
--- a/drivers/scsi/qla2xxx/qla_os.c
+++ b/drivers/scsi/qla2xxx/qla_os.c
@@ -409,6 +409,9 @@
int cnt;
for (cnt = 0; cnt < ha->max_req_queues; cnt++) {
+ if (!test_bit(cnt, ha->req_qid_map))
+ continue;
+
req = ha->req_q_map[cnt];
qla2x00_free_req_que(ha, req);
}
@@ -416,6 +419,9 @@
ha->req_q_map = NULL;
for (cnt = 0; cnt < ha->max_rsp_queues; cnt++) {
+ if (!test_bit(cnt, ha->rsp_qid_map))
+ continue;
+
rsp = ha->rsp_q_map[cnt];
qla2x00_free_rsp_que(ha, rsp);
}
diff --git a/drivers/scsi/qla2xxx/qla_target.c b/drivers/scsi/qla2xxx/qla_target.c
index 8075a4c..ee967be 100644
--- a/drivers/scsi/qla2xxx/qla_target.c
+++ b/drivers/scsi/qla2xxx/qla_target.c
@@ -105,7 +105,7 @@
static int qlt_issue_task_mgmt(struct qla_tgt_sess *sess, uint32_t lun,
int fn, void *iocb, int flags);
static void qlt_send_term_exchange(struct scsi_qla_host *ha, struct qla_tgt_cmd
- *cmd, struct atio_from_isp *atio, int ha_locked);
+ *cmd, struct atio_from_isp *atio, int ha_locked, int ul_abort);
static void qlt_reject_free_srr_imm(struct scsi_qla_host *ha,
struct qla_tgt_srr_imm *imm, int ha_lock);
static void qlt_abort_cmd_on_host_reset(struct scsi_qla_host *vha,
@@ -1756,7 +1756,7 @@
qlt_send_notify_ack(vha, &mcmd->orig_iocb.imm_ntfy,
0, 0, 0, 0, 0, 0);
else {
- if (mcmd->se_cmd.se_tmr_req->function == TMR_ABORT_TASK)
+ if (mcmd->orig_iocb.atio.u.raw.entry_type == ABTS_RECV_24XX)
qlt_24xx_send_abts_resp(vha, &mcmd->orig_iocb.abts,
mcmd->fc_tm_rsp, false);
else
@@ -2665,7 +2665,7 @@
/* no need to terminate. FW already freed exchange. */
qlt_abort_cmd_on_host_reset(cmd->vha, cmd);
else
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
return 0;
}
@@ -3173,7 +3173,8 @@
}
static void qlt_send_term_exchange(struct scsi_qla_host *vha,
- struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked)
+ struct qla_tgt_cmd *cmd, struct atio_from_isp *atio, int ha_locked,
+ int ul_abort)
{
unsigned long flags = 0;
int rc;
@@ -3193,8 +3194,7 @@
qlt_alloc_qfull_cmd(vha, atio, 0, 0);
done:
- if (cmd && (!cmd->aborted ||
- !cmd->cmd_sent_to_fw)) {
+ if (cmd && !ul_abort && !cmd->aborted) {
if (cmd->sg_mapped)
qlt_unmap_sg(vha, cmd);
vha->hw->tgt.tgt_ops->free_cmd(cmd);
@@ -3253,21 +3253,38 @@
}
-void qlt_abort_cmd(struct qla_tgt_cmd *cmd)
+int qlt_abort_cmd(struct qla_tgt_cmd *cmd)
{
struct qla_tgt *tgt = cmd->tgt;
struct scsi_qla_host *vha = tgt->vha;
struct se_cmd *se_cmd = &cmd->se_cmd;
+ unsigned long flags;
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf014,
"qla_target(%d): terminating exchange for aborted cmd=%p "
"(se_cmd=%p, tag=%llu)", vha->vp_idx, cmd, &cmd->se_cmd,
se_cmd->tag);
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if (cmd->aborted) {
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ /*
+ * It's normal to see 2 calls in this path:
+ * 1) XFER Rdy completion + CMD_T_ABORT
+ * 2) TCM TMR - drain_state_list
+ */
+ ql_dbg(ql_dbg_tgt_mgt, vha, 0xffff,
+ "multiple abort. %p transport_state %x, t_state %x,"
+ " se_cmd_flags %x \n", cmd, cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state,cmd->se_cmd.se_cmd_flags);
+ return EIO;
+ }
cmd->aborted = 1;
cmd->cmd_flags |= BIT_6;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 0);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 0, 1);
+ return 0;
}
EXPORT_SYMBOL(qlt_abort_cmd);
@@ -3282,6 +3299,9 @@
BUG_ON(cmd->cmd_in_wq);
+ if (cmd->sg_mapped)
+ qlt_unmap_sg(cmd->vha, cmd);
+
if (!cmd->q_full)
qlt_decr_num_pend_cmds(cmd->vha);
@@ -3399,7 +3419,7 @@
term = 1;
if (term)
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
return term;
}
@@ -3580,12 +3600,13 @@
case CTIO_PORT_LOGGED_OUT:
case CTIO_PORT_UNAVAILABLE:
{
- int logged_out = (status & 0xFFFF);
+ int logged_out =
+ (status & 0xFFFF) == CTIO_PORT_LOGGED_OUT;
+
ql_dbg(ql_dbg_tgt_mgt, vha, 0xf059,
"qla_target(%d): CTIO with %s status %x "
"received (state %x, se_cmd %p)\n", vha->vp_idx,
- (logged_out == CTIO_PORT_LOGGED_OUT) ?
- "PORT LOGGED OUT" : "PORT UNAVAILABLE",
+ logged_out ? "PORT LOGGED OUT" : "PORT UNAVAILABLE",
status, cmd->state, se_cmd);
if (logged_out && cmd->sess) {
@@ -3754,6 +3775,7 @@
goto out_term;
}
+ spin_lock_init(&cmd->cmd_lock);
cdb = &atio->u.isp24.fcp_cmnd.cdb[0];
cmd->se_cmd.tag = atio->u.isp24.exchange_addr;
cmd->unpacked_lun = scsilun_to_int(
@@ -3796,7 +3818,7 @@
*/
cmd->cmd_flags |= BIT_2;
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_term_exchange(vha, NULL, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, NULL, &cmd->atio, 1, 0);
qlt_decr_num_pend_cmds(vha);
percpu_ida_free(&sess->se_sess->sess_tag_pool, cmd->se_cmd.map_tag);
@@ -3918,7 +3940,7 @@
out_term:
spin_lock_irqsave(&ha->hardware_lock, flags);
- qlt_send_term_exchange(vha, NULL, &op->atio, 1);
+ qlt_send_term_exchange(vha, NULL, &op->atio, 1, 0);
spin_unlock_irqrestore(&ha->hardware_lock, flags);
kfree(op);
@@ -3982,7 +4004,8 @@
cmd->cmd_in_wq = 1;
cmd->cmd_flags |= BIT_0;
- cmd->se_cmd.cpuid = -1;
+ cmd->se_cmd.cpuid = ha->msix_count ?
+ ha->tgt.rspq_vector_cpuid : WORK_CPU_UNBOUND;
spin_lock(&vha->cmd_list_lock);
list_add_tail(&cmd->cmd_list, &vha->qla_cmd_list);
@@ -3990,7 +4013,6 @@
INIT_WORK(&cmd->work, qlt_do_work);
if (ha->msix_count) {
- cmd->se_cmd.cpuid = ha->tgt.rspq_vector_cpuid;
if (cmd->atio.u.isp24.fcp_cmnd.rddata)
queue_work_on(smp_processor_id(), qla_tgt_wq,
&cmd->work);
@@ -4771,7 +4793,7 @@
dump_stack();
} else {
cmd->cmd_flags |= BIT_9;
- qlt_send_term_exchange(vha, cmd, &cmd->atio, 1);
+ qlt_send_term_exchange(vha, cmd, &cmd->atio, 1, 0);
}
spin_unlock_irqrestore(&ha->hardware_lock, flags);
}
@@ -4950,7 +4972,7 @@
sctio, sctio->srr_id);
list_del(&sctio->srr_list_entry);
qlt_send_term_exchange(vha, sctio->cmd,
- &sctio->cmd->atio, 1);
+ &sctio->cmd->atio, 1, 0);
kfree(sctio);
}
}
@@ -5123,7 +5145,7 @@
atio->u.isp24.fcp_hdr.s_id);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
if (!sess) {
- qlt_send_term_exchange(vha, NULL, atio, 1);
+ qlt_send_term_exchange(vha, NULL, atio, 1, 0);
return 0;
}
/* Sending marker isn't necessary, since we called from ISR */
@@ -5406,7 +5428,7 @@
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
qlt_send_busy(vha, atio, SAM_STAT_BUSY);
#else
- qlt_send_term_exchange(vha, NULL, atio, 1);
+ qlt_send_term_exchange(vha, NULL, atio, 1, 0);
#endif
if (!ha_locked)
@@ -5523,7 +5545,7 @@
#if 1 /* With TERM EXCHANGE some FC cards refuse to boot */
qlt_send_busy(vha, atio, 0);
#else
- qlt_send_term_exchange(vha, NULL, atio, 1);
+ qlt_send_term_exchange(vha, NULL, atio, 1, 0);
#endif
} else {
if (tgt->tgt_stop) {
@@ -5532,7 +5554,7 @@
"command to target, sending TERM "
"EXCHANGE for rsp\n");
qlt_send_term_exchange(vha, NULL,
- atio, 1);
+ atio, 1, 0);
} else {
ql_dbg(ql_dbg_tgt, vha, 0xe060,
"qla_target(%d): Unable to send "
@@ -5960,7 +5982,7 @@
return;
out_term:
- qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 0);
+ qlt_send_term_exchange(vha, NULL, &prm->tm_iocb2, 1, 0);
if (sess)
ha->tgt.tgt_ops->put_sess(sess);
spin_unlock_irqrestore(&ha->tgt.sess_lock, flags);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 71b2865..22a6a76 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -943,6 +943,36 @@
qlt_plogi_ack_t *plogi_link[QLT_PLOGI_LINK_MAX];
};
+typedef enum {
+ /*
+ * BIT_0 - Atio Arrival / schedule to work
+ * BIT_1 - qlt_do_work
+ * BIT_2 - qlt_do work failed
+ * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
+ * BIT_4 - read respond/tcm_qla2xx_queue_data_in
+ * BIT_5 - status respond / tcm_qla2xx_queue_status
+ * BIT_6 - tcm request to abort/Term exchange.
+ * pre_xmit_response->qlt_send_term_exchange
+ * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
+ * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
+ * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
+ * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
+
+ * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
+ * BIT_13 - Bad completion -
+ * qlt_ctio_do_completion --> qlt_term_ctio_exchange
+ * BIT_14 - Back end data received/sent.
+ * BIT_15 - SRR prepare ctio
+ * BIT_16 - complete free
+ * BIT_17 - flush - qlt_abort_cmd_on_host_reset
+ * BIT_18 - completion w/abort status
+ * BIT_19 - completion w/unknown status
+ * BIT_20 - tcm_qla2xxx_free_cmd
+ */
+ CMD_FLAG_DATA_WORK = BIT_11,
+ CMD_FLAG_DATA_WORK_FREE = BIT_21,
+} cmd_flags_t;
+
struct qla_tgt_cmd {
struct se_cmd se_cmd;
struct qla_tgt_sess *sess;
@@ -952,6 +982,7 @@
/* Sense buffer that will be mapped into outgoing status */
unsigned char sense_buffer[TRANSPORT_SENSE_BUFFER];
+ spinlock_t cmd_lock;
/* to save extra sess dereferences */
unsigned int conf_compl_supported:1;
unsigned int sg_mapped:1;
@@ -986,30 +1017,8 @@
uint64_t jiffies_at_alloc;
uint64_t jiffies_at_free;
- /* BIT_0 - Atio Arrival / schedule to work
- * BIT_1 - qlt_do_work
- * BIT_2 - qlt_do work failed
- * BIT_3 - xfer rdy/tcm_qla2xxx_write_pending
- * BIT_4 - read respond/tcm_qla2xx_queue_data_in
- * BIT_5 - status respond / tcm_qla2xx_queue_status
- * BIT_6 - tcm request to abort/Term exchange.
- * pre_xmit_response->qlt_send_term_exchange
- * BIT_7 - SRR received (qlt_handle_srr->qlt_xmit_response)
- * BIT_8 - SRR received (qlt_handle_srr->qlt_rdy_to_xfer)
- * BIT_9 - SRR received (qla_handle_srr->qlt_send_term_exchange)
- * BIT_10 - Data in - hanlde_data->tcm_qla2xxx_handle_data
- * BIT_11 - Data actually going to TCM : tcm_qla2xx_handle_data_work
- * BIT_12 - good completion - qlt_ctio_do_completion -->free_cmd
- * BIT_13 - Bad completion -
- * qlt_ctio_do_completion --> qlt_term_ctio_exchange
- * BIT_14 - Back end data received/sent.
- * BIT_15 - SRR prepare ctio
- * BIT_16 - complete free
- * BIT_17 - flush - qlt_abort_cmd_on_host_reset
- * BIT_18 - completion w/abort status
- * BIT_19 - completion w/unknown status
- */
- uint32_t cmd_flags;
+
+ cmd_flags_t cmd_flags;
};
struct qla_tgt_sess_work_param {
@@ -1148,7 +1157,7 @@
extern void qlt_response_pkt_all_vps(struct scsi_qla_host *, response_t *);
extern int qlt_rdy_to_xfer(struct qla_tgt_cmd *);
extern int qlt_xmit_response(struct qla_tgt_cmd *, int, uint8_t);
-extern void qlt_abort_cmd(struct qla_tgt_cmd *);
+extern int qlt_abort_cmd(struct qla_tgt_cmd *);
extern void qlt_xmit_tm_rsp(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_mcmd(struct qla_tgt_mgmt_cmd *);
extern void qlt_free_cmd(struct qla_tgt_cmd *cmd);
diff --git a/drivers/scsi/qla2xxx/qla_tmpl.c b/drivers/scsi/qla2xxx/qla_tmpl.c
index ddbe2e7..c3e6225 100644
--- a/drivers/scsi/qla2xxx/qla_tmpl.c
+++ b/drivers/scsi/qla2xxx/qla_tmpl.c
@@ -395,6 +395,10 @@
if (ent->t263.queue_type == T263_QUEUE_TYPE_REQ) {
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
+
+ if (!test_bit(i, vha->hw->req_qid_map))
+ continue;
+
if (req || !buf) {
length = req ?
req->length : REQUEST_ENTRY_CNT_24XX;
@@ -408,6 +412,10 @@
} else if (ent->t263.queue_type == T263_QUEUE_TYPE_RSP) {
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+ if (!test_bit(i, vha->hw->rsp_qid_map))
+ continue;
+
if (rsp || !buf) {
length = rsp ?
rsp->length : RESPONSE_ENTRY_CNT_MQ;
@@ -634,6 +642,10 @@
if (ent->t274.queue_type == T274_QUEUE_TYPE_REQ_SHAD) {
for (i = 0; i < vha->hw->max_req_queues; i++) {
struct req_que *req = vha->hw->req_q_map[i];
+
+ if (!test_bit(i, vha->hw->req_qid_map))
+ continue;
+
if (req || !buf) {
qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len);
@@ -645,6 +657,10 @@
} else if (ent->t274.queue_type == T274_QUEUE_TYPE_RSP_SHAD) {
for (i = 0; i < vha->hw->max_rsp_queues; i++) {
struct rsp_que *rsp = vha->hw->rsp_q_map[i];
+
+ if (!test_bit(i, vha->hw->rsp_qid_map))
+ continue;
+
if (rsp || !buf) {
qla27xx_insert16(i, buf, len);
qla27xx_insert16(1, buf, len);
diff --git a/drivers/scsi/qla2xxx/tcm_qla2xxx.c b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
index faf0a12..1808a01 100644
--- a/drivers/scsi/qla2xxx/tcm_qla2xxx.c
+++ b/drivers/scsi/qla2xxx/tcm_qla2xxx.c
@@ -298,6 +298,10 @@
{
cmd->vha->tgt_counters.core_qla_free_cmd++;
cmd->cmd_in_wq = 1;
+
+ BUG_ON(cmd->cmd_flags & BIT_20);
+ cmd->cmd_flags |= BIT_20;
+
INIT_WORK(&cmd->work, tcm_qla2xxx_complete_free);
queue_work_on(smp_processor_id(), tcm_qla2xxx_free_wq, &cmd->work);
}
@@ -374,6 +378,20 @@
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
+
+ if (cmd->aborted) {
+ /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
+ * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+ * already kick start the free.
+ */
+ pr_debug("write_pending aborted cmd[%p] refcount %d "
+ "transport_state %x, t_state %x, se_cmd_flags %x\n",
+ cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+ cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state,
+ cmd->se_cmd.se_cmd_flags);
+ return 0;
+ }
cmd->cmd_flags |= BIT_3;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -405,7 +423,7 @@
se_cmd->t_state == TRANSPORT_COMPLETE_QF_WP) {
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
wait_for_completion_timeout(&se_cmd->t_transport_stop_comp,
- 3 * HZ);
+ 50);
return 0;
}
spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
@@ -444,6 +462,9 @@
if (bidi)
flags |= TARGET_SCF_BIDI_OP;
+ if (se_cmd->cpuid != WORK_CPU_UNBOUND)
+ flags |= TARGET_SCF_USE_CPUID;
+
sess = cmd->sess;
if (!sess) {
pr_err("Unable to locate struct qla_tgt_sess from qla_tgt_cmd\n");
@@ -465,13 +486,25 @@
static void tcm_qla2xxx_handle_data_work(struct work_struct *work)
{
struct qla_tgt_cmd *cmd = container_of(work, struct qla_tgt_cmd, work);
+ unsigned long flags;
/*
* Ensure that the complete FCP WRITE payload has been received.
* Otherwise return an exception via CHECK_CONDITION status.
*/
cmd->cmd_in_wq = 0;
- cmd->cmd_flags |= BIT_11;
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ cmd->cmd_flags |= CMD_FLAG_DATA_WORK;
+ if (cmd->aborted) {
+ cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
+ tcm_qla2xxx_free_cmd(cmd);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+
cmd->vha->tgt_counters.qla_core_ret_ctio++;
if (!cmd->write_data_transferred) {
/*
@@ -546,6 +579,20 @@
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
+ if (cmd->aborted) {
+ /* Cmd can loop during Q-full. tcm_qla2xxx_aborted_task
+ * can get ahead of this cmd. tcm_qla2xxx_aborted_task
+ * already kick start the free.
+ */
+ pr_debug("queue_data_in aborted cmd[%p] refcount %d "
+ "transport_state %x, t_state %x, se_cmd_flags %x\n",
+ cmd,cmd->se_cmd.cmd_kref.refcount.counter,
+ cmd->se_cmd.transport_state,
+ cmd->se_cmd.t_state,
+ cmd->se_cmd.se_cmd_flags);
+ return 0;
+ }
+
cmd->cmd_flags |= BIT_4;
cmd->bufflen = se_cmd->data_length;
cmd->dma_data_direction = target_reverse_dma_direction(se_cmd);
@@ -637,11 +684,34 @@
qlt_xmit_tm_rsp(mcmd);
}
+
+#define DATA_WORK_NOT_FREE(_flags) \
+ (( _flags & (CMD_FLAG_DATA_WORK|CMD_FLAG_DATA_WORK_FREE)) == \
+ CMD_FLAG_DATA_WORK)
static void tcm_qla2xxx_aborted_task(struct se_cmd *se_cmd)
{
struct qla_tgt_cmd *cmd = container_of(se_cmd,
struct qla_tgt_cmd, se_cmd);
- qlt_abort_cmd(cmd);
+ unsigned long flags;
+
+ if (qlt_abort_cmd(cmd))
+ return;
+
+ spin_lock_irqsave(&cmd->cmd_lock, flags);
+ if ((cmd->state == QLA_TGT_STATE_NEW)||
+ ((cmd->state == QLA_TGT_STATE_DATA_IN) &&
+ DATA_WORK_NOT_FREE(cmd->cmd_flags)) ) {
+
+ cmd->cmd_flags |= CMD_FLAG_DATA_WORK_FREE;
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ /* Cmd have not reached firmware.
+ * Use this trigger to free it. */
+ tcm_qla2xxx_free_cmd(cmd);
+ return;
+ }
+ spin_unlock_irqrestore(&cmd->cmd_lock, flags);
+ return;
+
}
static void tcm_qla2xxx_clear_sess_lookup(struct tcm_qla2xxx_lport *,
diff --git a/drivers/scsi/scsi_devinfo.c b/drivers/scsi/scsi_devinfo.c
index 47b9d13..bbfbfd9 100644
--- a/drivers/scsi/scsi_devinfo.c
+++ b/drivers/scsi/scsi_devinfo.c
@@ -205,6 +205,8 @@
{"Intel", "Multi-Flex", NULL, BLIST_NO_RSOC},
{"iRiver", "iFP Mass Driver", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
{"LASOUND", "CDX7405", "3.10", BLIST_MAX5LUN | BLIST_SINGLELUN},
+ {"Marvell", "Console", NULL, BLIST_SKIP_VPD_PAGES},
+ {"Marvell", "91xx Config", "1.01", BLIST_SKIP_VPD_PAGES},
{"MATSHITA", "PD-1", NULL, BLIST_FORCELUN | BLIST_SINGLELUN},
{"MATSHITA", "DMC-LC5", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
{"MATSHITA", "DMC-LC40", NULL, BLIST_NOT_LOCKABLE | BLIST_INQUIRY_36},
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index 4f18a85..00bc7218 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -1272,16 +1272,18 @@
void scsi_remove_target(struct device *dev)
{
struct Scsi_Host *shost = dev_to_shost(dev->parent);
- struct scsi_target *starget;
+ struct scsi_target *starget, *last_target = NULL;
unsigned long flags;
restart:
spin_lock_irqsave(shost->host_lock, flags);
list_for_each_entry(starget, &shost->__targets, siblings) {
- if (starget->state == STARGET_DEL)
+ if (starget->state == STARGET_DEL ||
+ starget == last_target)
continue;
if (starget->dev.parent == dev || &starget->dev == dev) {
kref_get(&starget->reap_ref);
+ last_target = starget;
spin_unlock_irqrestore(shost->host_lock, flags);
__scsi_remove_target(starget);
scsi_target_reap(starget);
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index bb669d3..d749da7 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -761,7 +761,7 @@
break;
default:
- ret = BLKPREP_KILL;
+ ret = BLKPREP_INVALID;
goto out;
}
@@ -839,7 +839,7 @@
int ret;
if (sdkp->device->no_write_same)
- return BLKPREP_KILL;
+ return BLKPREP_INVALID;
BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
diff --git a/drivers/scsi/storvsc_drv.c b/drivers/scsi/storvsc_drv.c
index 55627d0..292c04e 100644
--- a/drivers/scsi/storvsc_drv.c
+++ b/drivers/scsi/storvsc_drv.c
@@ -42,6 +42,7 @@
#include <scsi/scsi_devinfo.h>
#include <scsi/scsi_dbg.h>
#include <scsi/scsi_transport_fc.h>
+#include <scsi/scsi_transport.h>
/*
* All wire protocol details (storage protocol between the guest and the host)
@@ -477,19 +478,18 @@
struct storvsc_scan_work {
struct work_struct work;
struct Scsi_Host *host;
- uint lun;
+ u8 lun;
+ u8 tgt_id;
};
static void storvsc_device_scan(struct work_struct *work)
{
struct storvsc_scan_work *wrk;
- uint lun;
struct scsi_device *sdev;
wrk = container_of(work, struct storvsc_scan_work, work);
- lun = wrk->lun;
- sdev = scsi_device_lookup(wrk->host, 0, 0, lun);
+ sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
if (!sdev)
goto done;
scsi_rescan_device(&sdev->sdev_gendev);
@@ -540,7 +540,7 @@
if (!scsi_host_get(wrk->host))
goto done;
- sdev = scsi_device_lookup(wrk->host, 0, 0, wrk->lun);
+ sdev = scsi_device_lookup(wrk->host, 0, wrk->tgt_id, wrk->lun);
if (sdev) {
scsi_remove_device(sdev);
@@ -940,6 +940,7 @@
wrk->host = host;
wrk->lun = vm_srb->lun;
+ wrk->tgt_id = vm_srb->target_id;
INIT_WORK(&wrk->work, process_err_fn);
schedule_work(&wrk->work);
}
@@ -1770,6 +1771,11 @@
fc_transport_template = fc_attach_transport(&fc_transport_functions);
if (!fc_transport_template)
return -ENODEV;
+
+ /*
+ * Install Hyper-V specific timeout handler.
+ */
+ fc_transport_template->eh_timed_out = storvsc_eh_timed_out;
#endif
ret = vmbus_driver_register(&storvsc_drv);
diff --git a/drivers/sh/pm_runtime.c b/drivers/sh/pm_runtime.c
index 91a00301..a9bac3b 100644
--- a/drivers/sh/pm_runtime.c
+++ b/drivers/sh/pm_runtime.c
@@ -34,7 +34,7 @@
static int __init sh_pm_runtime_init(void)
{
- if (IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
+ if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_ARCH_SHMOBILE)) {
if (!of_find_compatible_node(NULL, NULL,
"renesas,cpg-mstp-clocks"))
return 0;
diff --git a/drivers/spi/spi-atmel.c b/drivers/spi/spi-atmel.c
index aebad36..8feac59 100644
--- a/drivers/spi/spi-atmel.c
+++ b/drivers/spi/spi-atmel.c
@@ -1571,6 +1571,7 @@
as->use_cs_gpios = true;
if (atmel_spi_is_v2(as) &&
+ pdev->dev.of_node &&
!of_get_property(pdev->dev.of_node, "cs-gpios", NULL)) {
as->use_cs_gpios = false;
master->num_chipselect = 4;
diff --git a/drivers/spi/spi-bcm2835aux.c b/drivers/spi/spi-bcm2835aux.c
index 7de6f84..ecc73c0 100644
--- a/drivers/spi/spi-bcm2835aux.c
+++ b/drivers/spi/spi-bcm2835aux.c
@@ -73,8 +73,8 @@
/* Bitfields in CNTL1 */
#define BCM2835_AUX_SPI_CNTL1_CSHIGH 0x00000700
-#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000080
-#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000040
+#define BCM2835_AUX_SPI_CNTL1_TXEMPTY 0x00000080
+#define BCM2835_AUX_SPI_CNTL1_IDLE 0x00000040
#define BCM2835_AUX_SPI_CNTL1_MSBF_IN 0x00000002
#define BCM2835_AUX_SPI_CNTL1_KEEP_IN 0x00000001
diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c
index 7fd6a4c..7cb0c19 100644
--- a/drivers/spi/spi-fsl-espi.c
+++ b/drivers/spi/spi-fsl-espi.c
@@ -84,7 +84,7 @@
/* SPCOM register values */
#define SPCOM_CS(x) ((x) << 30)
#define SPCOM_TRANLEN(x) ((x) << 0)
-#define SPCOM_TRANLEN_MAX 0xFFFF /* Max transaction length */
+#define SPCOM_TRANLEN_MAX 0x10000 /* Max transaction length */
#define AUTOSUSPEND_TIMEOUT 2000
@@ -233,7 +233,7 @@
reinit_completion(&mpc8xxx_spi->done);
/* Set SPCOM[CS] and SPCOM[TRANLEN] field */
- if ((t->len - 1) > SPCOM_TRANLEN_MAX) {
+ if (t->len > SPCOM_TRANLEN_MAX) {
dev_err(mpc8xxx_spi->dev, "Transaction length (%d)"
" beyond the SPCOM[TRANLEN] field\n", t->len);
return -EINVAL;
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index d98c33c..6a4ff27 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -929,7 +929,7 @@
tx->sgl, tx->nents, DMA_MEM_TO_DEV,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_tx)
- goto no_dma;
+ goto tx_nodma;
desc_tx->callback = spi_imx_dma_tx_callback;
desc_tx->callback_param = (void *)spi_imx;
@@ -941,7 +941,7 @@
rx->sgl, rx->nents, DMA_DEV_TO_MEM,
DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
if (!desc_rx)
- goto no_dma;
+ goto rx_nodma;
desc_rx->callback = spi_imx_dma_rx_callback;
desc_rx->callback_param = (void *)spi_imx;
@@ -1008,7 +1008,9 @@
return ret;
-no_dma:
+rx_nodma:
+ dmaengine_terminate_all(master->dma_tx);
+tx_nodma:
pr_warn_once("%s %s: DMA not available, falling back to PIO\n",
dev_driver_string(&master->dev),
dev_name(&master->dev));
diff --git a/drivers/spi/spi-loopback-test.c b/drivers/spi/spi-loopback-test.c
index 894616f..cf4bb36 100644
--- a/drivers/spi/spi-loopback-test.c
+++ b/drivers/spi/spi-loopback-test.c
@@ -761,6 +761,7 @@
test.iterate_transfer_mask = 1;
/* count number of transfers with tx/rx_buf != NULL */
+ rx_count = tx_count = 0;
for (i = 0; i < test.transfer_count; i++) {
if (test.transfers[i].tx_buf)
tx_count++;
diff --git a/drivers/spi/spi-omap2-mcspi.c b/drivers/spi/spi-omap2-mcspi.c
index 7273820..0caa3c8 100644
--- a/drivers/spi/spi-omap2-mcspi.c
+++ b/drivers/spi/spi-omap2-mcspi.c
@@ -1490,6 +1490,8 @@
return status;
disable_pm:
+ pm_runtime_dont_use_autosuspend(&pdev->dev);
+ pm_runtime_put_sync(&pdev->dev);
pm_runtime_disable(&pdev->dev);
free_master:
spi_master_put(master);
@@ -1501,6 +1503,7 @@
struct spi_master *master = platform_get_drvdata(pdev);
struct omap2_mcspi *mcspi = spi_master_get_devdata(master);
+ pm_runtime_dont_use_autosuspend(mcspi->dev);
pm_runtime_put_sync(mcspi->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 3327c49..713c63d9 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -898,7 +898,7 @@
da->unmap_zeroes_data = flag;
pr_debug("dev[%p]: SE Device Thin Provisioning LBPRZ bit: %d\n",
da->da_dev, flag);
- return 0;
+ return count;
}
/*
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index cacd97a..da457e2 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -828,6 +828,50 @@
return dev;
}
+/*
+ * Check if the underlying struct block_device request_queue supports
+ * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
+ * in ATA and we need to set TPE=1
+ */
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+ struct request_queue *q, int block_size)
+{
+ if (!blk_queue_discard(q))
+ return false;
+
+ attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
+ block_size;
+ /*
+ * Currently hardcoded to 1 in Linux/SCSI code..
+ */
+ attrib->max_unmap_block_desc_count = 1;
+ attrib->unmap_granularity = q->limits.discard_granularity / block_size;
+ attrib->unmap_granularity_alignment = q->limits.discard_alignment /
+ block_size;
+ attrib->unmap_zeroes_data = q->limits.discard_zeroes_data;
+ return true;
+}
+EXPORT_SYMBOL(target_configure_unmap_from_queue);
+
+/*
+ * Convert from blocksize advertised to the initiator to the 512 byte
+ * units unconditionally used by the Linux block layer.
+ */
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb)
+{
+ switch (dev->dev_attrib.block_size) {
+ case 4096:
+ return lb << 3;
+ case 2048:
+ return lb << 2;
+ case 1024:
+ return lb << 1;
+ default:
+ return lb;
+ }
+}
+EXPORT_SYMBOL(target_to_linux_sector);
+
int target_configure_device(struct se_device *dev)
{
struct se_hba *hba = dev->se_hba;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index e319570..75f0f08 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -160,25 +160,11 @@
" block_device blocks: %llu logical_block_size: %d\n",
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
- /*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
- */
- if (blk_queue_discard(q)) {
- dev->dev_attrib.max_unmap_lba_count =
- q->limits.max_discard_sectors;
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity =
- q->limits.discard_granularity >> 9;
- dev->dev_attrib.unmap_granularity_alignment =
- q->limits.discard_alignment;
+
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+ fd_dev->fd_block_size))
pr_debug("IFILE: BLOCK Discard support available,"
- " disabled by default\n");
- }
+ " disabled by default\n");
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -490,9 +476,12 @@
if (S_ISBLK(inode->i_mode)) {
/* The backend is block device, use discard */
struct block_device *bdev = inode->i_bdev;
+ struct se_device *dev = cmd->se_dev;
- ret = blkdev_issue_discard(bdev, lba,
- nolb, GFP_KERNEL, 0);
+ ret = blkdev_issue_discard(bdev,
+ target_to_linux_sector(dev, lba),
+ target_to_linux_sector(dev, nolb),
+ GFP_KERNEL, 0);
if (ret < 0) {
pr_warn("FILEIO: blkdev_issue_discard() failed: %d\n",
ret);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 5a2899f..abe4eb9 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,29 +121,11 @@
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- /*
- * Check if the underlying struct block_device request_queue supports
- * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
- * in ATA and we need to set TPE=1
- */
- if (blk_queue_discard(q)) {
- dev->dev_attrib.max_unmap_lba_count =
- q->limits.max_discard_sectors;
-
- /*
- * Currently hardcoded to 1 in Linux/SCSI code..
- */
- dev->dev_attrib.max_unmap_block_desc_count = 1;
- dev->dev_attrib.unmap_granularity =
- q->limits.discard_granularity >> 9;
- dev->dev_attrib.unmap_granularity_alignment =
- q->limits.discard_alignment;
- dev->dev_attrib.unmap_zeroes_data =
- q->limits.discard_zeroes_data;
-
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
+ dev->dev_attrib.hw_block_size))
pr_debug("IBLOCK: BLOCK Discard support available,"
- " disabled by default\n");
- }
+ " disabled by default\n");
+
/*
* Enable write same emulation for IBLOCK and use 0xFFFF as
* the smaller WRITE_SAME(10) only has a two-byte block count.
@@ -415,9 +397,13 @@
iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
{
struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
+ struct se_device *dev = cmd->se_dev;
int ret;
- ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
+ ret = blkdev_issue_discard(bdev,
+ target_to_linux_sector(dev, lba),
+ target_to_linux_sector(dev, nolb),
+ GFP_KERNEL, 0);
if (ret < 0) {
pr_err("blkdev_issue_discard() failed: %d\n", ret);
return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -433,8 +419,10 @@
struct scatterlist *sg;
struct bio *bio;
struct bio_list list;
- sector_t block_lba = cmd->t_task_lba;
- sector_t sectors = sbc_get_write_same_sectors(cmd);
+ struct se_device *dev = cmd->se_dev;
+ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
+ sector_t sectors = target_to_linux_sector(dev,
+ sbc_get_write_same_sectors(cmd));
if (cmd->prot_op) {
pr_err("WRITE_SAME: Protection information with IBLOCK"
@@ -648,12 +636,12 @@
enum dma_data_direction data_direction)
{
struct se_device *dev = cmd->se_dev;
+ sector_t block_lba = target_to_linux_sector(dev, cmd->t_task_lba);
struct iblock_req *ibr;
struct bio *bio, *bio_start;
struct bio_list list;
struct scatterlist *sg;
u32 sg_num = sgl_nents;
- sector_t block_lba;
unsigned bio_cnt;
int rw = 0;
int i;
@@ -679,24 +667,6 @@
rw = READ;
}
- /*
- * Convert the blocksize advertised to the initiator to the 512 byte
- * units unconditionally used by the Linux block layer.
- */
- if (dev->dev_attrib.block_size == 4096)
- block_lba = (cmd->t_task_lba << 3);
- else if (dev->dev_attrib.block_size == 2048)
- block_lba = (cmd->t_task_lba << 2);
- else if (dev->dev_attrib.block_size == 1024)
- block_lba = (cmd->t_task_lba << 1);
- else if (dev->dev_attrib.block_size == 512)
- block_lba = cmd->t_task_lba;
- else {
- pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
- " %u\n", dev->dev_attrib.block_size);
- return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
- }
-
ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
if (!ibr)
goto fail;
diff --git a/drivers/target/target_core_internal.h b/drivers/target/target_core_internal.h
index dae0750c..db4412f 100644
--- a/drivers/target/target_core_internal.h
+++ b/drivers/target/target_core_internal.h
@@ -141,7 +141,6 @@
int transport_dump_vpd_assoc(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident_type(struct t10_vpd *, unsigned char *, int);
int transport_dump_vpd_ident(struct t10_vpd *, unsigned char *, int);
-bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags);
void transport_clear_lun_ref(struct se_lun *);
void transport_send_task_abort(struct se_cmd *);
sense_reason_t target_cmd_size_check(struct se_cmd *cmd, unsigned int size);
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index fcdcb11..82a663b 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -68,23 +68,25 @@
if (dev) {
spin_lock_irqsave(&dev->se_tmr_lock, flags);
- list_del(&tmr->tmr_list);
+ list_del_init(&tmr->tmr_list);
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
}
kfree(tmr);
}
-static void core_tmr_handle_tas_abort(
- struct se_node_acl *tmr_nacl,
- struct se_cmd *cmd,
- int tas)
+static void core_tmr_handle_tas_abort(struct se_cmd *cmd, int tas)
{
- bool remove = true;
+ unsigned long flags;
+ bool remove = true, send_tas;
/*
* TASK ABORTED status (TAS) bit support
*/
- if ((tmr_nacl && (tmr_nacl != cmd->se_sess->se_node_acl)) && tas) {
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ send_tas = (cmd->transport_state & CMD_T_TAS);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
+ if (send_tas) {
remove = false;
transport_send_task_abort(cmd);
}
@@ -107,6 +109,46 @@
return 1;
}
+static bool __target_check_io_state(struct se_cmd *se_cmd,
+ struct se_session *tmr_sess, int tas)
+{
+ struct se_session *sess = se_cmd->se_sess;
+
+ assert_spin_locked(&sess->sess_cmd_lock);
+ WARN_ON_ONCE(!irqs_disabled());
+ /*
+ * If command already reached CMD_T_COMPLETE state within
+ * target_complete_cmd() or CMD_T_FABRIC_STOP due to shutdown,
+ * this se_cmd has been passed to fabric driver and will
+ * not be aborted.
+ *
+ * Otherwise, obtain a local se_cmd->cmd_kref now for TMR
+ * ABORT_TASK + LUN_RESET for CMD_T_ABORTED processing as
+ * long as se_cmd->cmd_kref is still active unless zero.
+ */
+ spin_lock(&se_cmd->t_state_lock);
+ if (se_cmd->transport_state & (CMD_T_COMPLETE | CMD_T_FABRIC_STOP)) {
+ pr_debug("Attempted to abort io tag: %llu already complete or"
+ " fabric stop, skipping\n", se_cmd->tag);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ if (sess->sess_tearing_down || se_cmd->cmd_wait_set) {
+ pr_debug("Attempted to abort io tag: %llu already shutdown,"
+ " skipping\n", se_cmd->tag);
+ spin_unlock(&se_cmd->t_state_lock);
+ return false;
+ }
+ se_cmd->transport_state |= CMD_T_ABORTED;
+
+ if ((tmr_sess != se_cmd->se_sess) && tas)
+ se_cmd->transport_state |= CMD_T_TAS;
+
+ spin_unlock(&se_cmd->t_state_lock);
+
+ return kref_get_unless_zero(&se_cmd->cmd_kref);
+}
+
void core_tmr_abort_task(
struct se_device *dev,
struct se_tmr_req *tmr,
@@ -130,34 +172,22 @@
if (tmr->ref_task_tag != ref_tag)
continue;
- if (!kref_get_unless_zero(&se_cmd->cmd_kref))
- continue;
-
printk("ABORT_TASK: Found referenced %s task_tag: %llu\n",
se_cmd->se_tfo->get_fabric_name(), ref_tag);
- spin_lock(&se_cmd->t_state_lock);
- if (se_cmd->transport_state & CMD_T_COMPLETE) {
- printk("ABORT_TASK: ref_tag: %llu already complete,"
- " skipping\n", ref_tag);
- spin_unlock(&se_cmd->t_state_lock);
+ if (!__target_check_io_state(se_cmd, se_sess, 0)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
-
target_put_sess_cmd(se_cmd);
-
goto out;
}
- se_cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock(&se_cmd->t_state_lock);
-
list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
cancel_work_sync(&se_cmd->work);
transport_wait_for_tasks(se_cmd);
- target_put_sess_cmd(se_cmd);
transport_cmd_finish_abort(se_cmd, true);
+ target_put_sess_cmd(se_cmd);
printk("ABORT_TASK: Sending TMR_FUNCTION_COMPLETE for"
" ref_tag: %llu\n", ref_tag);
@@ -178,9 +208,11 @@
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_tmr_list);
+ struct se_session *sess;
struct se_tmr_req *tmr_p, *tmr_pp;
struct se_cmd *cmd;
unsigned long flags;
+ bool rc;
/*
* Release all pending and outgoing TMRs aside from the received
* LUN_RESET tmr..
@@ -206,17 +238,39 @@
if (target_check_cdb_and_preempt(preempt_and_abort_list, cmd))
continue;
+ sess = cmd->se_sess;
+ if (WARN_ON_ONCE(!sess))
+ continue;
+
+ spin_lock(&sess->sess_cmd_lock);
spin_lock(&cmd->t_state_lock);
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
+ if (!(cmd->transport_state & CMD_T_ACTIVE) ||
+ (cmd->transport_state & CMD_T_FABRIC_STOP)) {
spin_unlock(&cmd->t_state_lock);
+ spin_unlock(&sess->sess_cmd_lock);
continue;
}
if (cmd->t_state == TRANSPORT_ISTATE_PROCESSING) {
spin_unlock(&cmd->t_state_lock);
+ spin_unlock(&sess->sess_cmd_lock);
continue;
}
+ if (sess->sess_tearing_down || cmd->cmd_wait_set) {
+ spin_unlock(&cmd->t_state_lock);
+ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
+ cmd->transport_state |= CMD_T_ABORTED;
spin_unlock(&cmd->t_state_lock);
+ rc = kref_get_unless_zero(&cmd->cmd_kref);
+ if (!rc) {
+ printk("LUN_RESET TMR: non-zero kref_get_unless_zero\n");
+ spin_unlock(&sess->sess_cmd_lock);
+ continue;
+ }
+ spin_unlock(&sess->sess_cmd_lock);
+
list_move_tail(&tmr_p->tmr_list, &drain_tmr_list);
}
spin_unlock_irqrestore(&dev->se_tmr_lock, flags);
@@ -230,20 +284,26 @@
(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
tmr_p->function, tmr_p->response, cmd->t_state);
+ cancel_work_sync(&cmd->work);
+ transport_wait_for_tasks(cmd);
+
transport_cmd_finish_abort(cmd, 1);
+ target_put_sess_cmd(cmd);
}
}
static void core_tmr_drain_state_list(
struct se_device *dev,
struct se_cmd *prout_cmd,
- struct se_node_acl *tmr_nacl,
+ struct se_session *tmr_sess,
int tas,
struct list_head *preempt_and_abort_list)
{
LIST_HEAD(drain_task_list);
+ struct se_session *sess;
struct se_cmd *cmd, *next;
unsigned long flags;
+ int rc;
/*
* Complete outstanding commands with TASK_ABORTED SAM status.
@@ -282,6 +342,16 @@
if (prout_cmd == cmd)
continue;
+ sess = cmd->se_sess;
+ if (WARN_ON_ONCE(!sess))
+ continue;
+
+ spin_lock(&sess->sess_cmd_lock);
+ rc = __target_check_io_state(cmd, tmr_sess, tas);
+ spin_unlock(&sess->sess_cmd_lock);
+ if (!rc)
+ continue;
+
list_move_tail(&cmd->state_list, &drain_task_list);
cmd->state_active = false;
}
@@ -289,7 +359,7 @@
while (!list_empty(&drain_task_list)) {
cmd = list_entry(drain_task_list.next, struct se_cmd, state_list);
- list_del(&cmd->state_list);
+ list_del_init(&cmd->state_list);
pr_debug("LUN_RESET: %s cmd: %p"
" ITT/CmdSN: 0x%08llx/0x%08x, i_state: %d, t_state: %d"
@@ -313,16 +383,11 @@
* loop above, but we do it down here given that
* cancel_work_sync may block.
*/
- if (cmd->t_state == TRANSPORT_COMPLETE)
- cancel_work_sync(&cmd->work);
+ cancel_work_sync(&cmd->work);
+ transport_wait_for_tasks(cmd);
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- target_stop_cmd(cmd, &flags);
-
- cmd->transport_state |= CMD_T_ABORTED;
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- core_tmr_handle_tas_abort(tmr_nacl, cmd, tas);
+ core_tmr_handle_tas_abort(cmd, tas);
+ target_put_sess_cmd(cmd);
}
}
@@ -334,6 +399,7 @@
{
struct se_node_acl *tmr_nacl = NULL;
struct se_portal_group *tmr_tpg = NULL;
+ struct se_session *tmr_sess = NULL;
int tas;
/*
* TASK_ABORTED status bit, this is configurable via ConfigFS
@@ -352,8 +418,9 @@
* or struct se_device passthrough..
*/
if (tmr && tmr->task_cmd && tmr->task_cmd->se_sess) {
- tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
- tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
+ tmr_sess = tmr->task_cmd->se_sess;
+ tmr_nacl = tmr_sess->se_node_acl;
+ tmr_tpg = tmr_sess->se_tpg;
if (tmr_nacl && tmr_tpg) {
pr_debug("LUN_RESET: TMR caller fabric: %s"
" initiator port %s\n",
@@ -366,7 +433,7 @@
dev->transport->name, tas);
core_tmr_drain_tmr_list(dev, tmr, preempt_and_abort_list);
- core_tmr_drain_state_list(dev, prout_cmd, tmr_nacl, tas,
+ core_tmr_drain_state_list(dev, prout_cmd, tmr_sess, tas,
preempt_and_abort_list);
/*
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index 9f3608e..867bc6d 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -534,9 +534,6 @@
}
EXPORT_SYMBOL(transport_deregister_session);
-/*
- * Called with cmd->t_state_lock held.
- */
static void target_remove_from_state_list(struct se_cmd *cmd)
{
struct se_device *dev = cmd->se_dev;
@@ -561,10 +558,6 @@
{
unsigned long flags;
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (write_pending)
- cmd->t_state = TRANSPORT_WRITE_PENDING;
-
if (remove_from_lists) {
target_remove_from_state_list(cmd);
@@ -574,6 +567,10 @@
cmd->se_lun = NULL;
}
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (write_pending)
+ cmd->t_state = TRANSPORT_WRITE_PENDING;
+
/*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
@@ -627,6 +624,8 @@
void transport_cmd_finish_abort(struct se_cmd *cmd, int remove)
{
+ bool ack_kref = (cmd->se_cmd_flags & SCF_ACK_KREF);
+
if (cmd->se_cmd_flags & SCF_SE_LUN_CMD)
transport_lun_remove_cmd(cmd);
/*
@@ -638,7 +637,7 @@
if (transport_cmd_check_stop_to_fabric(cmd))
return;
- if (remove)
+ if (remove && ack_kref)
transport_put_cmd(cmd);
}
@@ -694,19 +693,10 @@
}
/*
- * See if we are waiting to complete for an exception condition.
- */
- if (cmd->transport_state & CMD_T_REQUEST_STOP) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- complete(&cmd->task_stop_comp);
- return;
- }
-
- /*
* Check for case where an explicit ABORT_TASK has been received
* and transport_wait_for_tasks() will be waiting for completion..
*/
- if (cmd->transport_state & CMD_T_ABORTED &&
+ if (cmd->transport_state & CMD_T_ABORTED ||
cmd->transport_state & CMD_T_STOP) {
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
complete_all(&cmd->t_transport_stop_comp);
@@ -721,10 +711,10 @@
cmd->transport_state |= (CMD_T_COMPLETE | CMD_T_ACTIVE);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- if (cmd->cpuid == -1)
- queue_work(target_completion_wq, &cmd->work);
- else
+ if (cmd->se_cmd_flags & SCF_USE_CPUID)
queue_work_on(cmd->cpuid, target_completion_wq, &cmd->work);
+ else
+ queue_work(target_completion_wq, &cmd->work);
}
EXPORT_SYMBOL(target_complete_cmd);
@@ -1203,7 +1193,6 @@
INIT_LIST_HEAD(&cmd->state_list);
init_completion(&cmd->t_transport_stop_comp);
init_completion(&cmd->cmd_wait_comp);
- init_completion(&cmd->task_stop_comp);
spin_lock_init(&cmd->t_state_lock);
kref_init(&cmd->cmd_kref);
cmd->transport_state = CMD_T_DEV_ACTIVE;
@@ -1437,6 +1426,12 @@
*/
transport_init_se_cmd(se_cmd, se_tpg->se_tpg_tfo, se_sess,
data_length, data_dir, task_attr, sense);
+
+ if (flags & TARGET_SCF_USE_CPUID)
+ se_cmd->se_cmd_flags |= SCF_USE_CPUID;
+ else
+ se_cmd->cpuid = WORK_CPU_UNBOUND;
+
if (flags & TARGET_SCF_UNKNOWN_SIZE)
se_cmd->unknown_data_length = 1;
/*
@@ -1635,33 +1630,6 @@
EXPORT_SYMBOL(target_submit_tmr);
/*
- * If the cmd is active, request it to be stopped and sleep until it
- * has completed.
- */
-bool target_stop_cmd(struct se_cmd *cmd, unsigned long *flags)
- __releases(&cmd->t_state_lock)
- __acquires(&cmd->t_state_lock)
-{
- bool was_active = false;
-
- if (cmd->transport_state & CMD_T_BUSY) {
- cmd->transport_state |= CMD_T_REQUEST_STOP;
- spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
-
- pr_debug("cmd %p waiting to complete\n", cmd);
- wait_for_completion(&cmd->task_stop_comp);
- pr_debug("cmd %p stopped successfully\n", cmd);
-
- spin_lock_irqsave(&cmd->t_state_lock, *flags);
- cmd->transport_state &= ~CMD_T_REQUEST_STOP;
- cmd->transport_state &= ~CMD_T_BUSY;
- was_active = true;
- }
-
- return was_active;
-}
-
-/*
* Handle SAM-esque emulation for generic transport request failures.
*/
void transport_generic_request_failure(struct se_cmd *cmd,
@@ -1859,19 +1827,21 @@
return true;
}
+static int __transport_check_aborted_status(struct se_cmd *, int);
+
void target_execute_cmd(struct se_cmd *cmd)
{
/*
- * If the received CDB has aleady been aborted stop processing it here.
- */
- if (transport_check_aborted_status(cmd, 1))
- return;
-
- /*
* Determine if frontend context caller is requesting the stopping of
* this command for frontend exceptions.
+ *
+ * If the received CDB has aleady been aborted stop processing it here.
*/
spin_lock_irq(&cmd->t_state_lock);
+ if (__transport_check_aborted_status(cmd, 1)) {
+ spin_unlock_irq(&cmd->t_state_lock);
+ return;
+ }
if (cmd->transport_state & CMD_T_STOP) {
pr_debug("%s:%d CMD_T_STOP for ITT: 0x%08llx\n",
__func__, __LINE__, cmd->tag);
@@ -2222,28 +2192,6 @@
}
/**
- * transport_release_cmd - free a command
- * @cmd: command to free
- *
- * This routine unconditionally frees a command, and reference counting
- * or list removal must be done in the caller.
- */
-static int transport_release_cmd(struct se_cmd *cmd)
-{
- BUG_ON(!cmd->se_tfo);
-
- if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
- core_tmr_release_req(cmd->se_tmr_req);
- if (cmd->t_task_cdb != cmd->__t_task_cdb)
- kfree(cmd->t_task_cdb);
- /*
- * If this cmd has been setup with target_get_sess_cmd(), drop
- * the kref and call ->release_cmd() in kref callback.
- */
- return target_put_sess_cmd(cmd);
-}
-
-/**
* transport_put_cmd - release a reference to a command
* @cmd: command to release
*
@@ -2251,8 +2199,12 @@
*/
static int transport_put_cmd(struct se_cmd *cmd)
{
- transport_free_pages(cmd);
- return transport_release_cmd(cmd);
+ BUG_ON(!cmd->se_tfo);
+ /*
+ * If this cmd has been setup with target_get_sess_cmd(), drop
+ * the kref and call ->release_cmd() in kref callback.
+ */
+ return target_put_sess_cmd(cmd);
}
void *transport_kmap_data_sg(struct se_cmd *cmd)
@@ -2450,34 +2402,58 @@
}
}
-int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+static bool
+__transport_wait_for_tasks(struct se_cmd *, bool, bool *, bool *,
+ unsigned long *flags);
+
+static void target_wait_free_cmd(struct se_cmd *cmd, bool *aborted, bool *tas)
{
unsigned long flags;
+
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ __transport_wait_for_tasks(cmd, true, aborted, tas, &flags);
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+}
+
+int transport_generic_free_cmd(struct se_cmd *cmd, int wait_for_tasks)
+{
int ret = 0;
+ bool aborted = false, tas = false;
if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD)) {
if (wait_for_tasks && (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
- transport_wait_for_tasks(cmd);
+ target_wait_free_cmd(cmd, &aborted, &tas);
- ret = transport_release_cmd(cmd);
+ if (!aborted || tas)
+ ret = transport_put_cmd(cmd);
} else {
if (wait_for_tasks)
- transport_wait_for_tasks(cmd);
+ target_wait_free_cmd(cmd, &aborted, &tas);
/*
* Handle WRITE failure case where transport_generic_new_cmd()
* has already added se_cmd to state_list, but fabric has
* failed command before I/O submission.
*/
- if (cmd->state_active) {
- spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->state_active)
target_remove_from_state_list(cmd);
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- }
if (cmd->se_lun)
transport_lun_remove_cmd(cmd);
- ret = transport_put_cmd(cmd);
+ if (!aborted || tas)
+ ret = transport_put_cmd(cmd);
+ }
+ /*
+ * If the task has been internally aborted due to TMR ABORT_TASK
+ * or LUN_RESET, target_core_tmr.c is responsible for performing
+ * the remaining calls to target_put_sess_cmd(), and not the
+ * callers of this function.
+ */
+ if (aborted) {
+ pr_debug("Detected CMD_T_ABORTED for ITT: %llu\n", cmd->tag);
+ wait_for_completion(&cmd->cmd_wait_comp);
+ cmd->se_tfo->release_cmd(cmd);
+ ret = 1;
}
return ret;
}
@@ -2517,26 +2493,46 @@
}
EXPORT_SYMBOL(target_get_sess_cmd);
+static void target_free_cmd_mem(struct se_cmd *cmd)
+{
+ transport_free_pages(cmd);
+
+ if (cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)
+ core_tmr_release_req(cmd->se_tmr_req);
+ if (cmd->t_task_cdb != cmd->__t_task_cdb)
+ kfree(cmd->t_task_cdb);
+}
+
static void target_release_cmd_kref(struct kref *kref)
{
struct se_cmd *se_cmd = container_of(kref, struct se_cmd, cmd_kref);
struct se_session *se_sess = se_cmd->se_sess;
unsigned long flags;
+ bool fabric_stop;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (list_empty(&se_cmd->se_cmd_list)) {
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
return;
}
- if (se_sess->sess_tearing_down && se_cmd->cmd_wait_set) {
+
+ spin_lock(&se_cmd->t_state_lock);
+ fabric_stop = (se_cmd->transport_state & CMD_T_FABRIC_STOP);
+ spin_unlock(&se_cmd->t_state_lock);
+
+ if (se_cmd->cmd_wait_set || fabric_stop) {
+ list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ target_free_cmd_mem(se_cmd);
complete(&se_cmd->cmd_wait_comp);
return;
}
- list_del(&se_cmd->se_cmd_list);
+ list_del_init(&se_cmd->se_cmd_list);
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
+ target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
}
@@ -2548,6 +2544,7 @@
struct se_session *se_sess = se_cmd->se_sess;
if (!se_sess) {
+ target_free_cmd_mem(se_cmd);
se_cmd->se_tfo->release_cmd(se_cmd);
return 1;
}
@@ -2564,6 +2561,7 @@
{
struct se_cmd *se_cmd;
unsigned long flags;
+ int rc;
spin_lock_irqsave(&se_sess->sess_cmd_lock, flags);
if (se_sess->sess_tearing_down) {
@@ -2573,8 +2571,15 @@
se_sess->sess_tearing_down = 1;
list_splice_init(&se_sess->sess_cmd_list, &se_sess->sess_wait_list);
- list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list)
- se_cmd->cmd_wait_set = 1;
+ list_for_each_entry(se_cmd, &se_sess->sess_wait_list, se_cmd_list) {
+ rc = kref_get_unless_zero(&se_cmd->cmd_kref);
+ if (rc) {
+ se_cmd->cmd_wait_set = 1;
+ spin_lock(&se_cmd->t_state_lock);
+ se_cmd->transport_state |= CMD_T_FABRIC_STOP;
+ spin_unlock(&se_cmd->t_state_lock);
+ }
+ }
spin_unlock_irqrestore(&se_sess->sess_cmd_lock, flags);
}
@@ -2587,15 +2592,25 @@
{
struct se_cmd *se_cmd, *tmp_cmd;
unsigned long flags;
+ bool tas;
list_for_each_entry_safe(se_cmd, tmp_cmd,
&se_sess->sess_wait_list, se_cmd_list) {
- list_del(&se_cmd->se_cmd_list);
+ list_del_init(&se_cmd->se_cmd_list);
pr_debug("Waiting for se_cmd: %p t_state: %d, fabric state:"
" %d\n", se_cmd, se_cmd->t_state,
se_cmd->se_tfo->get_cmd_state(se_cmd));
+ spin_lock_irqsave(&se_cmd->t_state_lock, flags);
+ tas = (se_cmd->transport_state & CMD_T_TAS);
+ spin_unlock_irqrestore(&se_cmd->t_state_lock, flags);
+
+ if (!target_put_sess_cmd(se_cmd)) {
+ if (tas)
+ target_put_sess_cmd(se_cmd);
+ }
+
wait_for_completion(&se_cmd->cmd_wait_comp);
pr_debug("After cmd_wait_comp: se_cmd: %p t_state: %d"
" fabric state: %d\n", se_cmd, se_cmd->t_state,
@@ -2617,6 +2632,58 @@
wait_for_completion(&lun->lun_ref_comp);
}
+static bool
+__transport_wait_for_tasks(struct se_cmd *cmd, bool fabric_stop,
+ bool *aborted, bool *tas, unsigned long *flags)
+ __releases(&cmd->t_state_lock)
+ __acquires(&cmd->t_state_lock)
+{
+
+ assert_spin_locked(&cmd->t_state_lock);
+ WARN_ON_ONCE(!irqs_disabled());
+
+ if (fabric_stop)
+ cmd->transport_state |= CMD_T_FABRIC_STOP;
+
+ if (cmd->transport_state & CMD_T_ABORTED)
+ *aborted = true;
+
+ if (cmd->transport_state & CMD_T_TAS)
+ *tas = true;
+
+ if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ return false;
+
+ if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
+ !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB))
+ return false;
+
+ if (!(cmd->transport_state & CMD_T_ACTIVE))
+ return false;
+
+ if (fabric_stop && *aborted)
+ return false;
+
+ cmd->transport_state |= CMD_T_STOP;
+
+ pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d,"
+ " t_state: %d, CMD_T_STOP\n", cmd, cmd->tag,
+ cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
+
+ spin_unlock_irqrestore(&cmd->t_state_lock, *flags);
+
+ wait_for_completion(&cmd->t_transport_stop_comp);
+
+ spin_lock_irqsave(&cmd->t_state_lock, *flags);
+ cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
+
+ pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->"
+ "t_transport_stop_comp) for ITT: 0x%08llx\n", cmd->tag);
+
+ return true;
+}
+
/**
* transport_wait_for_tasks - wait for completion to occur
* @cmd: command to wait
@@ -2627,43 +2694,13 @@
bool transport_wait_for_tasks(struct se_cmd *cmd)
{
unsigned long flags;
+ bool ret, aborted = false, tas = false;
spin_lock_irqsave(&cmd->t_state_lock, flags);
- if (!(cmd->se_cmd_flags & SCF_SE_LUN_CMD) &&
- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return false;
- }
-
- if (!(cmd->se_cmd_flags & SCF_SUPPORTED_SAM_OPCODE) &&
- !(cmd->se_cmd_flags & SCF_SCSI_TMR_CDB)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return false;
- }
-
- if (!(cmd->transport_state & CMD_T_ACTIVE)) {
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- return false;
- }
-
- cmd->transport_state |= CMD_T_STOP;
-
- pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08llx i_state: %d, t_state: %d, CMD_T_STOP\n",
- cmd, cmd->tag, cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
-
+ ret = __transport_wait_for_tasks(cmd, false, &aborted, &tas, &flags);
spin_unlock_irqrestore(&cmd->t_state_lock, flags);
- wait_for_completion(&cmd->t_transport_stop_comp);
-
- spin_lock_irqsave(&cmd->t_state_lock, flags);
- cmd->transport_state &= ~(CMD_T_ACTIVE | CMD_T_STOP);
-
- pr_debug("wait_for_tasks: Stopped wait_for_completion(&cmd->t_transport_stop_comp) for ITT: 0x%08llx\n",
- cmd->tag);
-
- spin_unlock_irqrestore(&cmd->t_state_lock, flags);
-
- return true;
+ return ret;
}
EXPORT_SYMBOL(transport_wait_for_tasks);
@@ -2845,28 +2882,49 @@
}
EXPORT_SYMBOL(transport_send_check_condition_and_sense);
-int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+static int __transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+ __releases(&cmd->t_state_lock)
+ __acquires(&cmd->t_state_lock)
{
+ assert_spin_locked(&cmd->t_state_lock);
+ WARN_ON_ONCE(!irqs_disabled());
+
if (!(cmd->transport_state & CMD_T_ABORTED))
return 0;
-
/*
* If cmd has been aborted but either no status is to be sent or it has
* already been sent, just return
*/
- if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS))
+ if (!send_status || !(cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS)) {
+ if (send_status)
+ cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
return 1;
+ }
- pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB: 0x%02x ITT: 0x%08llx\n",
- cmd->t_task_cdb[0], cmd->tag);
+ pr_debug("Sending delayed SAM_STAT_TASK_ABORTED status for CDB:"
+ " 0x%02x ITT: 0x%08llx\n", cmd->t_task_cdb[0], cmd->tag);
cmd->se_cmd_flags &= ~SCF_SEND_DELAYED_TAS;
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
trace_target_cmd_complete(cmd);
+
+ spin_unlock_irq(&cmd->t_state_lock);
cmd->se_tfo->queue_status(cmd);
+ spin_lock_irq(&cmd->t_state_lock);
return 1;
}
+
+int transport_check_aborted_status(struct se_cmd *cmd, int send_status)
+{
+ int ret;
+
+ spin_lock_irq(&cmd->t_state_lock);
+ ret = __transport_check_aborted_status(cmd, send_status);
+ spin_unlock_irq(&cmd->t_state_lock);
+
+ return ret;
+}
EXPORT_SYMBOL(transport_check_aborted_status);
void transport_send_task_abort(struct se_cmd *cmd)
@@ -2888,11 +2946,17 @@
*/
if (cmd->data_direction == DMA_TO_DEVICE) {
if (cmd->se_tfo->write_pending_status(cmd) != 0) {
- cmd->transport_state |= CMD_T_ABORTED;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->se_cmd_flags & SCF_SEND_DELAYED_TAS) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ goto send_abort;
+ }
cmd->se_cmd_flags |= SCF_SEND_DELAYED_TAS;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
return;
}
}
+send_abort:
cmd->scsi_status = SAM_STAT_TASK_ABORTED;
transport_lun_remove_cmd(cmd);
@@ -2909,8 +2973,17 @@
struct se_cmd *cmd = container_of(work, struct se_cmd, work);
struct se_device *dev = cmd->se_dev;
struct se_tmr_req *tmr = cmd->se_tmr_req;
+ unsigned long flags;
int ret;
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ tmr->response = TMR_FUNCTION_REJECTED;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ goto check_stop;
+ }
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
switch (tmr->function) {
case TMR_ABORT_TASK:
core_tmr_abort_task(dev, tmr, cmd->se_sess);
@@ -2943,9 +3016,17 @@
break;
}
+ spin_lock_irqsave(&cmd->t_state_lock, flags);
+ if (cmd->transport_state & CMD_T_ABORTED) {
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+ goto check_stop;
+ }
cmd->t_state = TRANSPORT_ISTATE_PROCESSING;
+ spin_unlock_irqrestore(&cmd->t_state_lock, flags);
+
cmd->se_tfo->queue_tm_rsp(cmd);
+check_stop:
transport_cmd_check_stop_to_fabric(cmd);
}
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index dd600e5..94f5154 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -903,7 +903,7 @@
info->version = __stringify(TCMU_MAILBOX_VERSION);
info->mem[0].name = "tcm-user command & data buffer";
- info->mem[0].addr = (phys_addr_t) udev->mb_addr;
+ info->mem[0].addr = (phys_addr_t)(uintptr_t)udev->mb_addr;
info->mem[0].size = TCMU_RING_SIZE;
info->mem[0].memtype = UIO_MEM_VIRTUAL;
diff --git a/drivers/thermal/Kconfig b/drivers/thermal/Kconfig
index 8cc4ac6..7c92c09 100644
--- a/drivers/thermal/Kconfig
+++ b/drivers/thermal/Kconfig
@@ -195,7 +195,7 @@
passive trip is crossed.
config SPEAR_THERMAL
- bool "SPEAr thermal sensor driver"
+ tristate "SPEAr thermal sensor driver"
depends on PLAT_SPEAR || COMPILE_TEST
depends on OF
help
@@ -237,8 +237,8 @@
framework.
config DB8500_THERMAL
- bool "DB8500 thermal management"
- depends on ARCH_U8500
+ tristate "DB8500 thermal management"
+ depends on MFD_DB8500_PRCMU
default y
help
Adds DB8500 thermal management implementation according to the thermal
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index e3fbc5a..6ceac4f 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -377,26 +377,28 @@
* get_load() - get load for a cpu since last updated
* @cpufreq_device: &struct cpufreq_cooling_device for this cpu
* @cpu: cpu number
+ * @cpu_idx: index of the cpu in cpufreq_device->allowed_cpus
*
* Return: The average load of cpu @cpu in percentage since this
* function was last called.
*/
-static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu)
+static u32 get_load(struct cpufreq_cooling_device *cpufreq_device, int cpu,
+ int cpu_idx)
{
u32 load;
u64 now, now_idle, delta_time, delta_idle;
now_idle = get_cpu_idle_time(cpu, &now, 0);
- delta_idle = now_idle - cpufreq_device->time_in_idle[cpu];
- delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu];
+ delta_idle = now_idle - cpufreq_device->time_in_idle[cpu_idx];
+ delta_time = now - cpufreq_device->time_in_idle_timestamp[cpu_idx];
if (delta_time <= delta_idle)
load = 0;
else
load = div64_u64(100 * (delta_time - delta_idle), delta_time);
- cpufreq_device->time_in_idle[cpu] = now_idle;
- cpufreq_device->time_in_idle_timestamp[cpu] = now;
+ cpufreq_device->time_in_idle[cpu_idx] = now_idle;
+ cpufreq_device->time_in_idle_timestamp[cpu_idx] = now;
return load;
}
@@ -598,7 +600,7 @@
u32 load;
if (cpu_online(cpu))
- load = get_load(cpufreq_device, cpu);
+ load = get_load(cpufreq_device, cpu, i);
else
load = 0;
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index be4eedc..9043f8f 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -475,14 +475,10 @@
sensor_np = of_node_get(dev->of_node);
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
struct of_phandle_args sensor_specs;
int ret, id;
- /* Check whether child is enabled or not */
- if (!of_device_is_available(child))
- continue;
-
/* For now, thermal framework supports only 1 sensor per zone */
ret = of_parse_phandle_with_args(child, "thermal-sensors",
"#thermal-sensor-cells",
@@ -881,16 +877,12 @@
return 0; /* Run successfully on systems without thermal DT */
}
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
struct thermal_zone_device *zone;
struct thermal_zone_params *tzp;
int i, mask = 0;
u32 prop;
- /* Check whether child is enabled or not */
- if (!of_device_is_available(child))
- continue;
-
tz = thermal_of_build_thermal_zone(child);
if (IS_ERR(tz)) {
pr_err("failed to build thermal zone %s: %ld\n",
@@ -968,13 +960,9 @@
return;
}
- for_each_child_of_node(np, child) {
+ for_each_available_child_of_node(np, child) {
struct thermal_zone_device *zone;
- /* Check whether child is enabled or not */
- if (!of_device_is_available(child))
- continue;
-
zone = thermal_zone_get_zone_by_name(child->name);
if (IS_ERR(zone))
continue;
diff --git a/drivers/thermal/rcar_thermal.c b/drivers/thermal/rcar_thermal.c
index 44b9c48..0e735ac 100644
--- a/drivers/thermal/rcar_thermal.c
+++ b/drivers/thermal/rcar_thermal.c
@@ -23,6 +23,7 @@
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/module.h>
+#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/reboot.h>
@@ -75,8 +76,10 @@
#define rcar_has_irq_support(priv) ((priv)->common->base)
#define rcar_id_to_shift(priv) ((priv)->id * 8)
+#define USE_OF_THERMAL 1
static const struct of_device_id rcar_thermal_dt_ids[] = {
{ .compatible = "renesas,rcar-thermal", },
+ { .compatible = "renesas,rcar-gen2-thermal", .data = (void *)USE_OF_THERMAL },
{},
};
MODULE_DEVICE_TABLE(of, rcar_thermal_dt_ids);
@@ -200,9 +203,9 @@
return ret;
}
-static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
+static int rcar_thermal_get_current_temp(struct rcar_thermal_priv *priv,
+ int *temp)
{
- struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
int tmp;
int ret;
@@ -226,6 +229,20 @@
return 0;
}
+static int rcar_thermal_of_get_temp(void *data, int *temp)
+{
+ struct rcar_thermal_priv *priv = data;
+
+ return rcar_thermal_get_current_temp(priv, temp);
+}
+
+static int rcar_thermal_get_temp(struct thermal_zone_device *zone, int *temp)
+{
+ struct rcar_thermal_priv *priv = rcar_zone_to_priv(zone);
+
+ return rcar_thermal_get_current_temp(priv, temp);
+}
+
static int rcar_thermal_get_trip_type(struct thermal_zone_device *zone,
int trip, enum thermal_trip_type *type)
{
@@ -282,6 +299,10 @@
return 0;
}
+static const struct thermal_zone_of_device_ops rcar_thermal_zone_of_ops = {
+ .get_temp = rcar_thermal_of_get_temp,
+};
+
static struct thermal_zone_device_ops rcar_thermal_zone_ops = {
.get_temp = rcar_thermal_get_temp,
.get_trip_type = rcar_thermal_get_trip_type,
@@ -318,14 +339,20 @@
priv = container_of(work, struct rcar_thermal_priv, work.work);
- rcar_thermal_get_temp(priv->zone, &cctemp);
+ ret = rcar_thermal_get_current_temp(priv, &cctemp);
+ if (ret < 0)
+ return;
+
ret = rcar_thermal_update_temp(priv);
if (ret < 0)
return;
rcar_thermal_irq_enable(priv);
- rcar_thermal_get_temp(priv->zone, &nctemp);
+ ret = rcar_thermal_get_current_temp(priv, &nctemp);
+ if (ret < 0)
+ return;
+
if (nctemp != cctemp)
thermal_zone_device_update(priv->zone);
}
@@ -403,6 +430,8 @@
struct rcar_thermal_priv *priv;
struct device *dev = &pdev->dev;
struct resource *res, *irq;
+ const struct of_device_id *of_id = of_match_device(rcar_thermal_dt_ids, dev);
+ unsigned long of_data = (unsigned long)of_id->data;
int mres = 0;
int i;
int ret = -ENODEV;
@@ -463,7 +492,13 @@
if (ret < 0)
goto error_unregister;
- priv->zone = thermal_zone_device_register("rcar_thermal",
+ if (of_data == USE_OF_THERMAL)
+ priv->zone = thermal_zone_of_sensor_register(
+ dev, i, priv,
+ &rcar_thermal_zone_of_ops);
+ else
+ priv->zone = thermal_zone_device_register(
+ "rcar_thermal",
1, 0, priv,
&rcar_thermal_zone_ops, NULL, 0,
idle);
diff --git a/drivers/thermal/spear_thermal.c b/drivers/thermal/spear_thermal.c
index 534dd91..81b35aa 100644
--- a/drivers/thermal/spear_thermal.c
+++ b/drivers/thermal/spear_thermal.c
@@ -54,8 +54,7 @@
.get_temp = thermal_get_temp,
};
-#ifdef CONFIG_PM
-static int spear_thermal_suspend(struct device *dev)
+static int __maybe_unused spear_thermal_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -72,7 +71,7 @@
return 0;
}
-static int spear_thermal_resume(struct device *dev)
+static int __maybe_unused spear_thermal_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct thermal_zone_device *spear_thermal = platform_get_drvdata(pdev);
@@ -94,7 +93,6 @@
return 0;
}
-#endif
static SIMPLE_DEV_PM_OPS(spear_thermal_pm_ops, spear_thermal_suspend,
spear_thermal_resume);
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index b311004..2348fa6 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -681,7 +681,14 @@
/* this is called once with whichever end is closed last */
static void pty_unix98_shutdown(struct tty_struct *tty)
{
- devpts_kill_index(tty->driver_data, tty->index);
+ struct inode *ptmx_inode;
+
+ if (tty->driver->subtype == PTY_TYPE_MASTER)
+ ptmx_inode = tty->driver_data;
+ else
+ ptmx_inode = tty->link->driver_data;
+ devpts_kill_index(ptmx_inode, tty->index);
+ devpts_del_ref(ptmx_inode);
}
static const struct tty_operations ptm_unix98_ops = {
@@ -773,6 +780,18 @@
set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
tty->driver_data = inode;
+ /*
+ * In the case where all references to ptmx inode are dropped and we
+ * still have /dev/tty opened pointing to the master/slave pair (ptmx
+ * is closed/released before /dev/tty), we must make sure that the inode
+ * is still valid when we call the final pty_unix98_shutdown, thus we
+ * hold an additional reference to the ptmx inode. For the same /dev/tty
+ * last close case, we also need to make sure the super_block isn't
+ * destroyed (devpts instance unmounted), before /dev/tty is closed and
+ * on its release devpts_kill_index is called.
+ */
+ devpts_add_ref(inode);
+
tty_add_file(tty, filp);
slave_inode = devpts_pty_new(inode,
diff --git a/drivers/tty/serial/8250/8250_pci.c b/drivers/tty/serial/8250/8250_pci.c
index e71ec78..7cd6f9a 100644
--- a/drivers/tty/serial/8250/8250_pci.c
+++ b/drivers/tty/serial/8250/8250_pci.c
@@ -1941,6 +1941,7 @@
#define PCIE_VENDOR_ID_WCH 0x1c00
#define PCIE_DEVICE_ID_WCH_CH382_2S1P 0x3250
#define PCIE_DEVICE_ID_WCH_CH384_4S 0x3470
+#define PCIE_DEVICE_ID_WCH_CH382_2S 0x3253
#define PCI_VENDOR_ID_PERICOM 0x12D8
#define PCI_DEVICE_ID_PERICOM_PI7C9X7951 0x7951
@@ -2637,6 +2638,14 @@
.subdevice = PCI_ANY_ID,
.setup = pci_wch_ch353_setup,
},
+ /* WCH CH382 2S card (16850 clone) */
+ {
+ .vendor = PCIE_VENDOR_ID_WCH,
+ .device = PCIE_DEVICE_ID_WCH_CH382_2S,
+ .subvendor = PCI_ANY_ID,
+ .subdevice = PCI_ANY_ID,
+ .setup = pci_wch_ch38x_setup,
+ },
/* WCH CH382 2S1P card (16850 clone) */
{
.vendor = PCIE_VENDOR_ID_WCH,
@@ -2955,6 +2964,7 @@
pbn_fintek_4,
pbn_fintek_8,
pbn_fintek_12,
+ pbn_wch382_2,
pbn_wch384_4,
pbn_pericom_PI7C9X7951,
pbn_pericom_PI7C9X7952,
@@ -3775,6 +3785,13 @@
.base_baud = 115200,
.first_offset = 0x40,
},
+ [pbn_wch382_2] = {
+ .flags = FL_BASE0,
+ .num_ports = 2,
+ .base_baud = 115200,
+ .uart_offset = 8,
+ .first_offset = 0xC0,
+ },
[pbn_wch384_4] = {
.flags = FL_BASE0,
.num_ports = 4,
@@ -5574,6 +5591,10 @@
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_b0_bt_2_115200 },
+ { PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH382_2S,
+ PCI_ANY_ID, PCI_ANY_ID,
+ 0, 0, pbn_wch382_2 },
+
{ PCIE_VENDOR_ID_WCH, PCIE_DEVICE_ID_WCH_CH384_4S,
PCI_ANY_ID, PCI_ANY_ID,
0, 0, pbn_wch384_4 },
diff --git a/drivers/tty/serial/omap-serial.c b/drivers/tty/serial/omap-serial.c
index b645f92..fa49eb1 100644
--- a/drivers/tty/serial/omap-serial.c
+++ b/drivers/tty/serial/omap-serial.c
@@ -1165,7 +1165,7 @@
#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
-static void wait_for_xmitr(struct uart_omap_port *up)
+static void __maybe_unused wait_for_xmitr(struct uart_omap_port *up)
{
unsigned int status, tmout = 10000;
@@ -1343,7 +1343,7 @@
/* Enable or disable the rs485 support */
static int
-serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485conf)
+serial_omap_config_rs485(struct uart_port *port, struct serial_rs485 *rs485)
{
struct uart_omap_port *up = to_uart_omap_port(port);
unsigned int mode;
@@ -1356,8 +1356,12 @@
up->ier = 0;
serial_out(up, UART_IER, 0);
+ /* Clamp the delays to [0, 100ms] */
+ rs485->delay_rts_before_send = min(rs485->delay_rts_before_send, 100U);
+ rs485->delay_rts_after_send = min(rs485->delay_rts_after_send, 100U);
+
/* store new config */
- port->rs485 = *rs485conf;
+ port->rs485 = *rs485;
/*
* Just as a precaution, only allow rs485
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 5cec01c..a7eacef 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -2066,13 +2066,12 @@
if (tty) {
mutex_unlock(&tty_mutex);
retval = tty_lock_interruptible(tty);
+ tty_kref_put(tty); /* drop kref from tty_driver_lookup_tty() */
if (retval) {
if (retval == -EINTR)
retval = -ERESTARTSYS;
goto err_unref;
}
- /* safe to drop the kref from tty_driver_lookup_tty() */
- tty_kref_put(tty);
retval = tty_reopen(tty);
if (retval < 0) {
tty_unlock(tty);
diff --git a/drivers/tty/tty_mutex.c b/drivers/tty/tty_mutex.c
index d2f3c4c..dfa9ec0 100644
--- a/drivers/tty/tty_mutex.c
+++ b/drivers/tty/tty_mutex.c
@@ -21,10 +21,15 @@
int tty_lock_interruptible(struct tty_struct *tty)
{
+ int ret;
+
if (WARN(tty->magic != TTY_MAGIC, "L Bad %p\n", tty))
return -EIO;
tty_kref_get(tty);
- return mutex_lock_interruptible(&tty->legacy_mutex);
+ ret = mutex_lock_interruptible(&tty->legacy_mutex);
+ if (ret)
+ tty_kref_put(tty);
+ return ret;
}
void __lockfunc tty_unlock(struct tty_struct *tty)
diff --git a/drivers/usb/chipidea/ci_hdrc_pci.c b/drivers/usb/chipidea/ci_hdrc_pci.c
index b59195e..b635ab6 100644
--- a/drivers/usb/chipidea/ci_hdrc_pci.c
+++ b/drivers/usb/chipidea/ci_hdrc_pci.c
@@ -85,8 +85,8 @@
/* register a nop PHY */
ci->phy = usb_phy_generic_register();
- if (!ci->phy)
- return -ENOMEM;
+ if (IS_ERR(ci->phy))
+ return PTR_ERR(ci->phy);
memset(res, 0, sizeof(res));
res[0].start = pci_resource_start(pdev, 0);
diff --git a/drivers/usb/chipidea/debug.c b/drivers/usb/chipidea/debug.c
index a4f7db2..df47110 100644
--- a/drivers/usb/chipidea/debug.c
+++ b/drivers/usb/chipidea/debug.c
@@ -100,6 +100,9 @@
if (sscanf(buf, "%u", &mode) != 1)
return -EINVAL;
+ if (mode > 255)
+ return -EBADRQC;
+
pm_runtime_get_sync(ci->dev);
spin_lock_irqsave(&ci->lock, flags);
ret = hw_port_test_set(ci, mode);
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 350dcd9..51b43691 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -5401,6 +5401,7 @@
}
bos = udev->bos;
+ udev->bos = NULL;
for (i = 0; i < SET_CONFIG_TRIES; ++i) {
@@ -5493,11 +5494,8 @@
usb_set_usb2_hardware_lpm(udev, 1);
usb_unlocked_enable_lpm(udev);
usb_enable_ltm(udev);
- /* release the new BOS descriptor allocated by hub_port_init() */
- if (udev->bos != bos) {
- usb_release_bos_descriptor(udev);
- udev->bos = bos;
- }
+ usb_release_bos_descriptor(udev);
+ udev->bos = bos;
return 0;
re_enumerate:
diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig
index fd95ba6..f0decc0 100644
--- a/drivers/usb/dwc2/Kconfig
+++ b/drivers/usb/dwc2/Kconfig
@@ -1,5 +1,6 @@
config USB_DWC2
tristate "DesignWare USB2 DRD Core Support"
+ depends on HAS_DMA
depends on USB || USB_GADGET
help
Say Y here if your system has a Dual Role Hi-Speed USB
diff --git a/drivers/usb/dwc2/core.c b/drivers/usb/dwc2/core.c
index e991d55..46c4ba7 100644
--- a/drivers/usb/dwc2/core.c
+++ b/drivers/usb/dwc2/core.c
@@ -619,6 +619,12 @@
__func__, hsotg->dr_mode);
break;
}
+
+ /*
+ * NOTE: This is required for some rockchip soc based
+ * platforms.
+ */
+ msleep(50);
}
/*
diff --git a/drivers/usb/dwc2/hcd_ddma.c b/drivers/usb/dwc2/hcd_ddma.c
index 36606fc..a41274a 100644
--- a/drivers/usb/dwc2/hcd_ddma.c
+++ b/drivers/usb/dwc2/hcd_ddma.c
@@ -1174,14 +1174,11 @@
failed = dwc2_update_non_isoc_urb_state_ddma(hsotg, chan, qtd, dma_desc,
halt_status, n_bytes,
xfer_done);
- if (*xfer_done && urb->status != -EINPROGRESS)
- failed = 1;
-
- if (failed) {
+ if (failed || (*xfer_done && urb->status != -EINPROGRESS)) {
dwc2_host_complete(hsotg, qtd, urb->status);
dwc2_hcd_qtd_unlink_and_free(hsotg, qtd, qh);
- dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x status=%08x\n",
- failed, *xfer_done, urb->status);
+ dev_vdbg(hsotg->dev, "failed=%1x xfer_done=%1x\n",
+ failed, *xfer_done);
return failed;
}
@@ -1236,21 +1233,23 @@
list_for_each_safe(qtd_item, qtd_tmp, &qh->qtd_list) {
int i;
+ int qtd_desc_count;
qtd = list_entry(qtd_item, struct dwc2_qtd, qtd_list_entry);
xfer_done = 0;
+ qtd_desc_count = qtd->n_desc;
- for (i = 0; i < qtd->n_desc; i++) {
+ for (i = 0; i < qtd_desc_count; i++) {
if (dwc2_process_non_isoc_desc(hsotg, chan, chnum, qtd,
desc_num, halt_status,
- &xfer_done)) {
- qtd = NULL;
- break;
- }
+ &xfer_done))
+ goto stop_scan;
+
desc_num++;
}
}
+stop_scan:
if (qh->ep_type != USB_ENDPOINT_XFER_CONTROL) {
/*
* Resetting the data toggle for bulk and interrupt endpoints
@@ -1258,7 +1257,7 @@
*/
if (halt_status == DWC2_HC_XFER_STALL)
qh->data_toggle = DWC2_HC_PID_DATA0;
- else if (qtd)
+ else
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
}
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index f825380..cadba8b 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -525,11 +525,19 @@
u32 pid = (hctsiz & TSIZ_SC_MC_PID_MASK) >> TSIZ_SC_MC_PID_SHIFT;
if (chan->ep_type != USB_ENDPOINT_XFER_CONTROL) {
+ if (WARN(!chan || !chan->qh,
+ "chan->qh must be specified for non-control eps\n"))
+ return;
+
if (pid == TSIZ_SC_MC_PID_DATA0)
chan->qh->data_toggle = DWC2_HC_PID_DATA0;
else
chan->qh->data_toggle = DWC2_HC_PID_DATA1;
} else {
+ if (WARN(!qtd,
+ "qtd must be specified for control eps\n"))
+ return;
+
if (pid == TSIZ_SC_MC_PID_DATA0)
qtd->data_toggle = DWC2_HC_PID_DATA0;
else
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2913068..e4f8b90 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -856,7 +856,6 @@
unsigned pullups_connected:1;
unsigned resize_fifos:1;
unsigned setup_packet_pending:1;
- unsigned start_config_issued:1;
unsigned three_stage_setup:1;
unsigned usb3_lpm_capable:1;
diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c
index 3a9354a..8d6b75c 100644
--- a/drivers/usb/dwc3/ep0.c
+++ b/drivers/usb/dwc3/ep0.c
@@ -555,7 +555,6 @@
int ret;
u32 reg;
- dwc->start_config_issued = false;
cfg = le16_to_cpu(ctrl->wValue);
switch (state) {
@@ -737,10 +736,6 @@
dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_ISOCH_DELAY");
ret = dwc3_ep0_set_isoch_delay(dwc, ctrl);
break;
- case USB_REQ_SET_INTERFACE:
- dwc3_trace(trace_dwc3_ep0, "USB_REQ_SET_INTERFACE");
- dwc->start_config_issued = false;
- /* Fall through */
default:
dwc3_trace(trace_dwc3_ep0, "Forwarding to gadget driver");
ret = dwc3_ep0_delegate_req(dwc, ctrl);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7d1dd82..2363bad 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -385,24 +385,66 @@
dep->trb_pool_dma = 0;
}
+static int dwc3_gadget_set_xfer_resource(struct dwc3 *dwc, struct dwc3_ep *dep);
+
+/**
+ * dwc3_gadget_start_config - Configure EP resources
+ * @dwc: pointer to our controller context structure
+ * @dep: endpoint that is being enabled
+ *
+ * The assignment of transfer resources cannot perfectly follow the
+ * data book due to the fact that the controller driver does not have
+ * all knowledge of the configuration in advance. It is given this
+ * information piecemeal by the composite gadget framework after every
+ * SET_CONFIGURATION and SET_INTERFACE. Trying to follow the databook
+ * programming model in this scenario can cause errors. For two
+ * reasons:
+ *
+ * 1) The databook says to do DEPSTARTCFG for every SET_CONFIGURATION
+ * and SET_INTERFACE (8.1.5). This is incorrect in the scenario of
+ * multiple interfaces.
+ *
+ * 2) The databook does not mention doing more DEPXFERCFG for new
+ * endpoint on alt setting (8.1.6).
+ *
+ * The following simplified method is used instead:
+ *
+ * All hardware endpoints can be assigned a transfer resource and this
+ * setting will stay persistent until either a core reset or
+ * hibernation. So whenever we do a DEPSTARTCFG(0) we can go ahead and
+ * do DEPXFERCFG for every hardware endpoint as well. We are
+ * guaranteed that there are as many transfer resources as endpoints.
+ *
+ * This function is called for each endpoint when it is being enabled
+ * but is triggered only when called for EP0-out, which always happens
+ * first, and which should only happen in one of the above conditions.
+ */
static int dwc3_gadget_start_config(struct dwc3 *dwc, struct dwc3_ep *dep)
{
struct dwc3_gadget_ep_cmd_params params;
u32 cmd;
+ int i;
+ int ret;
+
+ if (dep->number)
+ return 0;
memset(¶ms, 0x00, sizeof(params));
+ cmd = DWC3_DEPCMD_DEPSTARTCFG;
- if (dep->number != 1) {
- cmd = DWC3_DEPCMD_DEPSTARTCFG;
- /* XferRscIdx == 0 for ep0 and 2 for the remaining */
- if (dep->number > 1) {
- if (dwc->start_config_issued)
- return 0;
- dwc->start_config_issued = true;
- cmd |= DWC3_DEPCMD_PARAM(2);
- }
+ ret = dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms);
+ if (ret)
+ return ret;
- return dwc3_send_gadget_ep_cmd(dwc, 0, cmd, ¶ms);
+ for (i = 0; i < DWC3_ENDPOINTS_NUM; i++) {
+ struct dwc3_ep *dep = dwc->eps[i];
+
+ if (!dep)
+ continue;
+
+ ret = dwc3_gadget_set_xfer_resource(dwc, dep);
+ if (ret)
+ return ret;
}
return 0;
@@ -516,10 +558,6 @@
struct dwc3_trb *trb_st_hw;
struct dwc3_trb *trb_link;
- ret = dwc3_gadget_set_xfer_resource(dwc, dep);
- if (ret)
- return ret;
-
dep->endpoint.desc = desc;
dep->comp_desc = comp_desc;
dep->type = usb_endpoint_type(desc);
@@ -1636,8 +1674,6 @@
}
dwc3_writel(dwc->regs, DWC3_DCFG, reg);
- dwc->start_config_issued = false;
-
/* Start with SuperSpeed Default */
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
@@ -2237,7 +2273,6 @@
dwc3_writel(dwc->regs, DWC3_DCTL, reg);
dwc3_disconnect_gadget(dwc);
- dwc->start_config_issued = false;
dwc->gadget.speed = USB_SPEED_UNKNOWN;
dwc->setup_packet_pending = false;
@@ -2288,7 +2323,6 @@
dwc3_stop_active_transfers(dwc);
dwc3_clear_stall_all_ep(dwc);
- dwc->start_config_issued = false;
/* Reset device address to zero */
reg = dwc3_readl(dwc->regs, DWC3_DCFG);
diff --git a/drivers/usb/gadget/legacy/inode.c b/drivers/usb/gadget/legacy/inode.c
index 7e179f8..87fb0fd 100644
--- a/drivers/usb/gadget/legacy/inode.c
+++ b/drivers/usb/gadget/legacy/inode.c
@@ -130,7 +130,8 @@
setup_can_stall : 1,
setup_out_ready : 1,
setup_out_error : 1,
- setup_abort : 1;
+ setup_abort : 1,
+ gadget_registered : 1;
unsigned setup_wLength;
/* the rest is basically write-once */
@@ -1179,7 +1180,8 @@
/* closing ep0 === shutdown all */
- usb_gadget_unregister_driver (&gadgetfs_driver);
+ if (dev->gadget_registered)
+ usb_gadget_unregister_driver (&gadgetfs_driver);
/* at this point "good" hardware has disconnected the
* device from USB; the host won't see it any more.
@@ -1847,6 +1849,7 @@
* kick in after the ep0 descriptor is closed.
*/
value = len;
+ dev->gadget_registered = true;
}
return value;
diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c
index 53c0692..93d28cb 100644
--- a/drivers/usb/gadget/udc/fsl_qe_udc.c
+++ b/drivers/usb/gadget/udc/fsl_qe_udc.c
@@ -2340,7 +2340,7 @@
{
struct qe_udc *udc;
struct device_node *np = ofdev->dev.of_node;
- unsigned int tmp_addr = 0;
+ unsigned long tmp_addr = 0;
struct usb_device_para __iomem *usbpram;
unsigned int i;
u64 size;
diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h
index 4dff60d..0d32052 100644
--- a/drivers/usb/gadget/udc/net2280.h
+++ b/drivers/usb/gadget/udc/net2280.h
@@ -369,9 +369,20 @@
static const u32 ep_enhanced[9] = { 0x10, 0x60, 0x30, 0x80,
0x50, 0x20, 0x70, 0x40, 0x90 };
- if (ep->dev->enhanced_mode)
+ if (ep->dev->enhanced_mode) {
reg = ep_enhanced[ep->num];
- else{
+ switch (ep->dev->gadget.speed) {
+ case USB_SPEED_SUPER:
+ reg += 2;
+ break;
+ case USB_SPEED_FULL:
+ reg += 1;
+ break;
+ case USB_SPEED_HIGH:
+ default:
+ break;
+ }
+ } else {
reg = (ep->num + 1) * 0x10;
if (ep->dev->gadget.speed != USB_SPEED_HIGH)
reg += 1;
diff --git a/drivers/usb/gadget/udc/udc-core.c b/drivers/usb/gadget/udc/udc-core.c
index fd73a3ea..b86a6f0 100644
--- a/drivers/usb/gadget/udc/udc-core.c
+++ b/drivers/usb/gadget/udc/udc-core.c
@@ -413,9 +413,10 @@
if (!driver->udc_name || strcmp(driver->udc_name,
dev_name(&udc->dev)) == 0) {
ret = udc_bind_to_driver(udc, driver);
+ if (ret != -EPROBE_DEFER)
+ list_del(&driver->pending);
if (ret)
goto err4;
- list_del(&driver->pending);
break;
}
}
diff --git a/drivers/usb/musb/musb_host.c b/drivers/usb/musb/musb_host.c
index 795a45b..58487a4 100644
--- a/drivers/usb/musb/musb_host.c
+++ b/drivers/usb/musb/musb_host.c
@@ -662,7 +662,7 @@
csr &= ~(MUSB_TXCSR_AUTOSET | MUSB_TXCSR_DMAMODE);
csr |= MUSB_TXCSR_DMAENAB; /* against programmer's guide */
}
- channel->desired_mode = mode;
+ channel->desired_mode = *mode;
musb_writew(epio, MUSB_TXCSR, csr);
return 0;
@@ -2003,10 +2003,8 @@
qh->offset,
urb->transfer_buffer_length);
- done = musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh,
- urb, xfer_len,
- iso_err);
- if (done)
+ if (musb_rx_dma_in_inventra_cppi41(c, hw_ep, qh, urb,
+ xfer_len, iso_err))
goto finish;
else
dev_err(musb->controller, "error: rx_dma failed\n");
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index 970a30e..72b387d 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -757,14 +757,8 @@
otg->host = host;
dev_dbg(otg->usb_phy->dev, "host driver registered w/ tranceiver\n");
- /*
- * Kick the state machine work, if peripheral is not supported
- * or peripheral is already registered with us.
- */
- if (motg->pdata->mode == USB_DR_MODE_HOST || otg->gadget) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
- }
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ schedule_work(&motg->sm_work);
return 0;
}
@@ -827,14 +821,8 @@
dev_dbg(otg->usb_phy->dev,
"peripheral driver registered w/ tranceiver\n");
- /*
- * Kick the state machine work, if host is not supported
- * or host is already registered with us.
- */
- if (motg->pdata->mode == USB_DR_MODE_PERIPHERAL || otg->host) {
- pm_runtime_get_sync(otg->usb_phy->dev);
- schedule_work(&motg->sm_work);
- }
+ pm_runtime_get_sync(otg->usb_phy->dev);
+ schedule_work(&motg->sm_work);
return 0;
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index 987813b..7c319e7 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -163,6 +163,8 @@
{ USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */
{ USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */
{ USB_DEVICE(0x18EF, 0xE025) }, /* ELV Marble Sound Board 1 */
+ { USB_DEVICE(0x1901, 0x0190) }, /* GE B850 CP2105 Recorder interface */
+ { USB_DEVICE(0x1901, 0x0193) }, /* GE B650 CP2104 PMC interface */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
{ USB_DEVICE(0x1BA4, 0x0002) }, /* Silicon Labs 358x factory default */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index db86e51..8849439a 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -315,6 +315,7 @@
#define TOSHIBA_PRODUCT_G450 0x0d45
#define ALINK_VENDOR_ID 0x1e0e
+#define SIMCOM_PRODUCT_SIM7100E 0x9001 /* Yes, ALINK_VENDOR_ID */
#define ALINK_PRODUCT_PH300 0x9100
#define ALINK_PRODUCT_3GU 0x9200
@@ -607,6 +608,10 @@
.reserved = BIT(3) | BIT(4),
};
+static const struct option_blacklist_info simcom_sim7100e_blacklist = {
+ .reserved = BIT(5) | BIT(6),
+};
+
static const struct option_blacklist_info telit_le910_blacklist = {
.sendsetup = BIT(0),
.reserved = BIT(1) | BIT(2),
@@ -1122,6 +1127,8 @@
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC650) },
{ USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUALCOMM_VENDOR_ID, 0x6001, 0xff, 0xff, 0xff), /* 4G LTE usb-modem U901 */
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */
{ USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */
@@ -1645,6 +1652,8 @@
{ USB_DEVICE(ALINK_VENDOR_ID, 0x9000) },
{ USB_DEVICE(ALINK_VENDOR_ID, ALINK_PRODUCT_PH300) },
{ USB_DEVICE_AND_INTERFACE_INFO(ALINK_VENDOR_ID, ALINK_PRODUCT_3GU, 0xff, 0xff, 0xff) },
+ { USB_DEVICE(ALINK_VENDOR_ID, SIMCOM_PRODUCT_SIM7100E),
+ .driver_info = (kernel_ulong_t)&simcom_sim7100e_blacklist },
{ USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200),
.driver_info = (kernel_ulong_t)&alcatel_x200_blacklist
},
diff --git a/drivers/video/fbdev/da8xx-fb.c b/drivers/video/fbdev/da8xx-fb.c
index 0081725..6b2a06d 100644
--- a/drivers/video/fbdev/da8xx-fb.c
+++ b/drivers/video/fbdev/da8xx-fb.c
@@ -152,7 +152,7 @@
struct da8xx_fb_par {
struct device *dev;
- resource_size_t p_palette_base;
+ dma_addr_t p_palette_base;
unsigned char *v_palette_base;
dma_addr_t vram_phys;
unsigned long vram_size;
@@ -1428,7 +1428,7 @@
par->vram_virt = dma_alloc_coherent(NULL,
par->vram_size,
- (resource_size_t *) &par->vram_phys,
+ &par->vram_phys,
GFP_KERNEL | GFP_DMA);
if (!par->vram_virt) {
dev_err(&device->dev,
@@ -1448,7 +1448,7 @@
/* allocate palette buffer */
par->v_palette_base = dma_zalloc_coherent(NULL, PALETTE_SIZE,
- (resource_size_t *)&par->p_palette_base,
+ &par->p_palette_base,
GFP_KERNEL | GFP_DMA);
if (!par->v_palette_base) {
dev_err(&device->dev,
diff --git a/drivers/video/fbdev/exynos/s6e8ax0.c b/drivers/video/fbdev/exynos/s6e8ax0.c
index 95873f2..de2f3e7 100644
--- a/drivers/video/fbdev/exynos/s6e8ax0.c
+++ b/drivers/video/fbdev/exynos/s6e8ax0.c
@@ -829,8 +829,7 @@
return 0;
}
-#ifdef CONFIG_PM
-static int s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
+static int __maybe_unused s6e8ax0_suspend(struct mipi_dsim_lcd_device *dsim_dev)
{
struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
@@ -843,7 +842,7 @@
return 0;
}
-static int s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
+static int __maybe_unused s6e8ax0_resume(struct mipi_dsim_lcd_device *dsim_dev)
{
struct s6e8ax0 *lcd = dev_get_drvdata(&dsim_dev->dev);
@@ -855,10 +854,6 @@
return 0;
}
-#else
-#define s6e8ax0_suspend NULL
-#define s6e8ax0_resume NULL
-#endif
static struct mipi_dsim_lcd_driver s6e8ax0_dsim_ddi_driver = {
.name = "s6e8ax0",
@@ -867,8 +862,8 @@
.power_on = s6e8ax0_power_on,
.set_sequence = s6e8ax0_set_sequence,
.probe = s6e8ax0_probe,
- .suspend = s6e8ax0_suspend,
- .resume = s6e8ax0_resume,
+ .suspend = IS_ENABLED(CONFIG_PM) ? s6e8ax0_suspend : NULL,
+ .resume = IS_ENABLED(CONFIG_PM) ? s6e8ax0_resume : NULL,
};
static int s6e8ax0_init(void)
diff --git a/drivers/video/fbdev/imxfb.c b/drivers/video/fbdev/imxfb.c
index cee8860..bb2f1e8 100644
--- a/drivers/video/fbdev/imxfb.c
+++ b/drivers/video/fbdev/imxfb.c
@@ -902,6 +902,21 @@
goto failed_getclock;
}
+ /*
+ * The LCDC controller does not have an enable bit. The
+ * controller starts directly when the clocks are enabled.
+ * If the clocks are enabled when the controller is not yet
+ * programmed with proper register values (enabled at the
+ * bootloader, for example) then it just goes into some undefined
+ * state.
+ * To avoid this issue, let's enable and disable LCDC IPG clock
+ * so that we force some kind of 'reset' to the LCDC block.
+ */
+ ret = clk_prepare_enable(fbi->clk_ipg);
+ if (ret)
+ goto failed_getclock;
+ clk_disable_unprepare(fbi->clk_ipg);
+
fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
if (IS_ERR(fbi->clk_ahb)) {
ret = PTR_ERR(fbi->clk_ahb);
diff --git a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
index de54a47..b6f83d5 100644
--- a/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
+++ b/drivers/video/fbdev/mmp/hw/mmp_ctrl.c
@@ -503,8 +503,7 @@
ctrl->reg_base = devm_ioremap_nocache(ctrl->dev,
res->start, resource_size(res));
if (ctrl->reg_base == NULL) {
- dev_err(ctrl->dev, "%s: res %x - %x map failed\n", __func__,
- res->start, res->end);
+ dev_err(ctrl->dev, "%s: res %pR map failed\n", __func__, res);
ret = -ENOMEM;
goto failed;
}
diff --git a/drivers/video/fbdev/ocfb.c b/drivers/video/fbdev/ocfb.c
index c9293ae..a970edc2 100644
--- a/drivers/video/fbdev/ocfb.c
+++ b/drivers/video/fbdev/ocfb.c
@@ -123,11 +123,11 @@
/* Horizontal timings */
ocfb_writereg(fbdev, OCFB_HTIM, (var->hsync_len - 1) << 24 |
- (var->right_margin - 1) << 16 | (var->xres - 1));
+ (var->left_margin - 1) << 16 | (var->xres - 1));
/* Vertical timings */
ocfb_writereg(fbdev, OCFB_VTIM, (var->vsync_len - 1) << 24 |
- (var->lower_margin - 1) << 16 | (var->yres - 1));
+ (var->upper_margin - 1) << 16 | (var->yres - 1));
/* Total length of frame */
hlen = var->left_margin + var->right_margin + var->hsync_len +
diff --git a/drivers/xen/xen-pciback/pciback_ops.c b/drivers/xen/xen-pciback/pciback_ops.c
index 73dafdc..fb02214 100644
--- a/drivers/xen/xen-pciback/pciback_ops.c
+++ b/drivers/xen/xen-pciback/pciback_ops.c
@@ -227,8 +227,9 @@
/*
* PCI_COMMAND_MEMORY must be enabled, otherwise we may not be able
* to access the BARs where the MSI-X entries reside.
+ * But VF devices are unique in which the PF needs to be checked.
*/
- pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ pci_read_config_word(pci_physfn(dev), PCI_COMMAND, &cmd);
if (dev->msi_enabled || !(cmd & PCI_COMMAND_MEMORY))
return -ENXIO;
@@ -332,6 +333,9 @@
struct xen_pcibk_dev_data *dev_data = NULL;
struct xen_pci_op *op = &pdev->op;
int test_intx = 0;
+#ifdef CONFIG_PCI_MSI
+ unsigned int nr = 0;
+#endif
*op = pdev->sh_info->op;
barrier();
@@ -360,6 +364,7 @@
op->err = xen_pcibk_disable_msi(pdev, dev, op);
break;
case XEN_PCI_OP_enable_msix:
+ nr = op->value;
op->err = xen_pcibk_enable_msix(pdev, dev, op);
break;
case XEN_PCI_OP_disable_msix:
@@ -382,7 +387,7 @@
if (op->cmd == XEN_PCI_OP_enable_msix && op->err == 0) {
unsigned int i;
- for (i = 0; i < op->value; i++)
+ for (i = 0; i < nr; i++)
pdev->sh_info->op.msix_entries[i].vector =
op->msix_entries[i].vector;
}
diff --git a/drivers/xen/xen-scsiback.c b/drivers/xen/xen-scsiback.c
index ad4eb10..c46ee18 100644
--- a/drivers/xen/xen-scsiback.c
+++ b/drivers/xen/xen-scsiback.c
@@ -849,15 +849,31 @@
}
/*
+ Check for a translation entry being present
+*/
+static struct v2p_entry *scsiback_chk_translation_entry(
+ struct vscsibk_info *info, struct ids_tuple *v)
+{
+ struct list_head *head = &(info->v2p_entry_lists);
+ struct v2p_entry *entry;
+
+ list_for_each_entry(entry, head, l)
+ if ((entry->v.chn == v->chn) &&
+ (entry->v.tgt == v->tgt) &&
+ (entry->v.lun == v->lun))
+ return entry;
+
+ return NULL;
+}
+
+/*
Add a new translation entry
*/
static int scsiback_add_translation_entry(struct vscsibk_info *info,
char *phy, struct ids_tuple *v)
{
int err = 0;
- struct v2p_entry *entry;
struct v2p_entry *new;
- struct list_head *head = &(info->v2p_entry_lists);
unsigned long flags;
char *lunp;
unsigned long long unpacked_lun;
@@ -917,15 +933,10 @@
spin_lock_irqsave(&info->v2p_lock, flags);
/* Check double assignment to identical virtual ID */
- list_for_each_entry(entry, head, l) {
- if ((entry->v.chn == v->chn) &&
- (entry->v.tgt == v->tgt) &&
- (entry->v.lun == v->lun)) {
- pr_warn("Virtual ID is already used. Assignment was not performed.\n");
- err = -EEXIST;
- goto out;
- }
-
+ if (scsiback_chk_translation_entry(info, v)) {
+ pr_warn("Virtual ID is already used. Assignment was not performed.\n");
+ err = -EEXIST;
+ goto out;
}
/* Create a new translation entry and add to the list */
@@ -933,18 +944,18 @@
new->v = *v;
new->tpg = tpg;
new->lun = unpacked_lun;
- list_add_tail(&new->l, head);
+ list_add_tail(&new->l, &info->v2p_entry_lists);
out:
spin_unlock_irqrestore(&info->v2p_lock, flags);
out_free:
- mutex_lock(&tpg->tv_tpg_mutex);
- tpg->tv_tpg_fe_count--;
- mutex_unlock(&tpg->tv_tpg_mutex);
-
- if (err)
+ if (err) {
+ mutex_lock(&tpg->tv_tpg_mutex);
+ tpg->tv_tpg_fe_count--;
+ mutex_unlock(&tpg->tv_tpg_mutex);
kfree(new);
+ }
return err;
}
@@ -956,39 +967,40 @@
}
/*
- Delete the translation entry specfied
+ Delete the translation entry specified
*/
static int scsiback_del_translation_entry(struct vscsibk_info *info,
struct ids_tuple *v)
{
struct v2p_entry *entry;
- struct list_head *head = &(info->v2p_entry_lists);
unsigned long flags;
+ int ret = 0;
spin_lock_irqsave(&info->v2p_lock, flags);
/* Find out the translation entry specified */
- list_for_each_entry(entry, head, l) {
- if ((entry->v.chn == v->chn) &&
- (entry->v.tgt == v->tgt) &&
- (entry->v.lun == v->lun)) {
- goto found;
- }
- }
+ entry = scsiback_chk_translation_entry(info, v);
+ if (entry)
+ __scsiback_del_translation_entry(entry);
+ else
+ ret = -ENOENT;
spin_unlock_irqrestore(&info->v2p_lock, flags);
- return 1;
-
-found:
- /* Delete the translation entry specfied */
- __scsiback_del_translation_entry(entry);
-
- spin_unlock_irqrestore(&info->v2p_lock, flags);
- return 0;
+ return ret;
}
static void scsiback_do_add_lun(struct vscsibk_info *info, const char *state,
char *phy, struct ids_tuple *vir, int try)
{
+ struct v2p_entry *entry;
+ unsigned long flags;
+
+ if (try) {
+ spin_lock_irqsave(&info->v2p_lock, flags);
+ entry = scsiback_chk_translation_entry(info, vir);
+ spin_unlock_irqrestore(&info->v2p_lock, flags);
+ if (entry)
+ return;
+ }
if (!scsiback_add_translation_entry(info, phy, vir)) {
if (xenbus_printf(XBT_NIL, info->dev->nodename, state,
"%d", XenbusStateInitialised)) {
diff --git a/drivers/xen/xenbus/xenbus_dev_frontend.c b/drivers/xen/xenbus/xenbus_dev_frontend.c
index 9433e46..912b64e 100644
--- a/drivers/xen/xenbus/xenbus_dev_frontend.c
+++ b/drivers/xen/xenbus/xenbus_dev_frontend.c
@@ -188,6 +188,8 @@
if (len == 0)
return 0;
+ if (len > XENSTORE_PAYLOAD_MAX)
+ return -EINVAL;
rb = kmalloc(sizeof(*rb) + len, GFP_KERNEL);
if (rb == NULL)
diff --git a/fs/affs/file.c b/fs/affs/file.c
index 0548c53..22fc7c8 100644
--- a/fs/affs/file.c
+++ b/fs/affs/file.c
@@ -511,8 +511,6 @@
pr_debug("%s(%lu, %ld, 0, %d)\n", __func__, inode->i_ino,
page->index, to);
BUG_ON(to > PAGE_CACHE_SIZE);
- kmap(page);
- data = page_address(page);
bsize = AFFS_SB(sb)->s_data_blksize;
tmp = page->index << PAGE_CACHE_SHIFT;
bidx = tmp / bsize;
@@ -524,14 +522,15 @@
return PTR_ERR(bh);
tmp = min(bsize - boff, to - pos);
BUG_ON(pos + tmp > to || tmp > bsize);
+ data = kmap_atomic(page);
memcpy(data + pos, AFFS_DATA(bh) + boff, tmp);
+ kunmap_atomic(data);
affs_brelse(bh);
bidx++;
pos += tmp;
boff = 0;
}
flush_dcache_page(page);
- kunmap(page);
return 0;
}
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index 051ea48..7d914c6 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -653,7 +653,7 @@
if ((current->flags & PF_RANDOMIZE) &&
!(current->personality & ADDR_NO_RANDOMIZE)) {
- random_variable = (unsigned long) get_random_int();
+ random_variable = get_random_long();
random_variable &= STACK_RND_MASK;
random_variable <<= PAGE_SHIFT;
}
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 39b3a17..826b164 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1201,7 +1201,11 @@
bdev->bd_disk = disk;
bdev->bd_queue = disk->queue;
bdev->bd_contains = bdev;
- bdev->bd_inode->i_flags = disk->fops->direct_access ? S_DAX : 0;
+ if (IS_ENABLED(CONFIG_BLK_DEV_DAX) && disk->fops->direct_access)
+ bdev->bd_inode->i_flags = S_DAX;
+ else
+ bdev->bd_inode->i_flags = 0;
+
if (!partno) {
ret = -ENXIO;
bdev->bd_part = disk_get_part(disk, partno);
@@ -1693,13 +1697,24 @@
return try_to_free_buffers(page);
}
+static int blkdev_writepages(struct address_space *mapping,
+ struct writeback_control *wbc)
+{
+ if (dax_mapping(mapping)) {
+ struct block_device *bdev = I_BDEV(mapping->host);
+
+ return dax_writeback_mapping_range(mapping, bdev, wbc);
+ }
+ return generic_writepages(mapping, wbc);
+}
+
static const struct address_space_operations def_blk_aops = {
.readpage = blkdev_readpage,
.readpages = blkdev_readpages,
.writepage = blkdev_writepage,
.write_begin = blkdev_write_begin,
.write_end = blkdev_write_end,
- .writepages = generic_writepages,
+ .writepages = blkdev_writepages,
.releasepage = blkdev_releasepage,
.direct_IO = blkdev_direct_IO,
.is_dirty_writeback = buffer_check_dirty_writeback,
diff --git a/fs/btrfs/backref.c b/fs/btrfs/backref.c
index b90cd37..f6dac40 100644
--- a/fs/btrfs/backref.c
+++ b/fs/btrfs/backref.c
@@ -1406,7 +1406,8 @@
read_extent_buffer(eb, dest + bytes_left,
name_off, name_len);
if (eb != eb_in) {
- btrfs_tree_read_unlock_blocking(eb);
+ if (!path->skip_locking)
+ btrfs_tree_read_unlock_blocking(eb);
free_extent_buffer(eb);
}
ret = btrfs_find_item(fs_root, path, parent, 0,
@@ -1426,9 +1427,10 @@
eb = path->nodes[0];
/* make sure we can use eb after releasing the path */
if (eb != eb_in) {
- atomic_inc(&eb->refs);
- btrfs_tree_read_lock(eb);
- btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ if (!path->skip_locking)
+ btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
+ path->nodes[0] = NULL;
+ path->locks[0] = 0;
}
btrfs_release_path(path);
iref = btrfs_item_ptr(eb, slot, struct btrfs_inode_ref);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index c473c42..3346cd8 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -637,11 +637,7 @@
faili = nr_pages - 1;
cb->nr_pages = nr_pages;
- /* In the parent-locked case, we only locked the range we are
- * interested in. In all other cases, we can opportunistically
- * cache decompressed data that goes beyond the requested range. */
- if (!(bio_flags & EXTENT_BIO_PARENT_LOCKED))
- add_ra_bio_pages(inode, em_start + em_len, cb);
+ add_ra_bio_pages(inode, em_start + em_len, cb);
/* include any pages we added in add_ra-bio_pages */
uncompressed_len = bio->bi_vcnt * PAGE_CACHE_SIZE;
diff --git a/fs/btrfs/delayed-inode.c b/fs/btrfs/delayed-inode.c
index 0be47e4..b57daa8 100644
--- a/fs/btrfs/delayed-inode.c
+++ b/fs/btrfs/delayed-inode.c
@@ -1689,7 +1689,7 @@
*
*/
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list)
+ struct list_head *ins_list, bool *emitted)
{
struct btrfs_dir_item *di;
struct btrfs_delayed_item *curr, *next;
@@ -1733,6 +1733,7 @@
if (over)
return 1;
+ *emitted = true;
}
return 0;
}
diff --git a/fs/btrfs/delayed-inode.h b/fs/btrfs/delayed-inode.h
index f70119f..0167853 100644
--- a/fs/btrfs/delayed-inode.h
+++ b/fs/btrfs/delayed-inode.h
@@ -144,7 +144,7 @@
int btrfs_should_delete_dir_index(struct list_head *del_list,
u64 index);
int btrfs_readdir_delayed_dir_index(struct dir_context *ctx,
- struct list_head *ins_list);
+ struct list_head *ins_list, bool *emitted);
/* for init */
int __init btrfs_delayed_inode_init(void);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 2e7c97a..392592d 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -2897,12 +2897,11 @@
struct block_device *bdev;
int ret;
int nr = 0;
- int parent_locked = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
size_t pg_offset = 0;
size_t iosize;
size_t disk_io_size;
size_t blocksize = inode->i_sb->s_blocksize;
- unsigned long this_bio_flag = *bio_flags & EXTENT_BIO_PARENT_LOCKED;
+ unsigned long this_bio_flag = 0;
set_page_extent_mapped(page);
@@ -2942,18 +2941,16 @@
kunmap_atomic(userpage);
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
- if (!parent_locked)
- unlock_extent_cached(tree, cur,
- cur + iosize - 1,
- &cached, GFP_NOFS);
+ unlock_extent_cached(tree, cur,
+ cur + iosize - 1,
+ &cached, GFP_NOFS);
break;
}
em = __get_extent_map(inode, page, pg_offset, cur,
end - cur + 1, get_extent, em_cached);
if (IS_ERR_OR_NULL(em)) {
SetPageError(page);
- if (!parent_locked)
- unlock_extent(tree, cur, end);
+ unlock_extent(tree, cur, end);
break;
}
extent_offset = cur - em->start;
@@ -3038,12 +3035,9 @@
set_extent_uptodate(tree, cur, cur + iosize - 1,
&cached, GFP_NOFS);
- if (parent_locked)
- free_extent_state(cached);
- else
- unlock_extent_cached(tree, cur,
- cur + iosize - 1,
- &cached, GFP_NOFS);
+ unlock_extent_cached(tree, cur,
+ cur + iosize - 1,
+ &cached, GFP_NOFS);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -3052,8 +3046,7 @@
if (test_range_bit(tree, cur, cur_end,
EXTENT_UPTODATE, 1, NULL)) {
check_page_uptodate(tree, page);
- if (!parent_locked)
- unlock_extent(tree, cur, cur + iosize - 1);
+ unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -3063,8 +3056,7 @@
*/
if (block_start == EXTENT_MAP_INLINE) {
SetPageError(page);
- if (!parent_locked)
- unlock_extent(tree, cur, cur + iosize - 1);
+ unlock_extent(tree, cur, cur + iosize - 1);
cur = cur + iosize;
pg_offset += iosize;
continue;
@@ -3083,8 +3075,7 @@
*bio_flags = this_bio_flag;
} else {
SetPageError(page);
- if (!parent_locked)
- unlock_extent(tree, cur, cur + iosize - 1);
+ unlock_extent(tree, cur, cur + iosize - 1);
}
cur = cur + iosize;
pg_offset += iosize;
@@ -3213,20 +3204,6 @@
return ret;
}
-int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
- get_extent_t *get_extent, int mirror_num)
-{
- struct bio *bio = NULL;
- unsigned long bio_flags = EXTENT_BIO_PARENT_LOCKED;
- int ret;
-
- ret = __do_readpage(tree, page, get_extent, NULL, &bio, mirror_num,
- &bio_flags, READ, NULL);
- if (bio)
- ret = submit_one_bio(READ, bio, mirror_num, bio_flags);
- return ret;
-}
-
static noinline void update_nr_written(struct page *page,
struct writeback_control *wbc,
unsigned long nr_written)
diff --git a/fs/btrfs/extent_io.h b/fs/btrfs/extent_io.h
index 0377413..880d529 100644
--- a/fs/btrfs/extent_io.h
+++ b/fs/btrfs/extent_io.h
@@ -29,7 +29,6 @@
*/
#define EXTENT_BIO_COMPRESSED 1
#define EXTENT_BIO_TREE_LOG 2
-#define EXTENT_BIO_PARENT_LOCKED 4
#define EXTENT_BIO_FLAG_SHIFT 16
/* these are bit numbers for test/set bit */
@@ -210,8 +209,6 @@
int try_lock_extent(struct extent_io_tree *tree, u64 start, u64 end);
int extent_read_full_page(struct extent_io_tree *tree, struct page *page,
get_extent_t *get_extent, int mirror_num);
-int extent_read_full_page_nolock(struct extent_io_tree *tree, struct page *page,
- get_extent_t *get_extent, int mirror_num);
int __init extent_io_init(void);
void extent_io_exit(void);
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 5f06eb1..d96f5cf 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -5717,6 +5717,7 @@
char *name_ptr;
int name_len;
int is_curr = 0; /* ctx->pos points to the current index? */
+ bool emitted;
/* FIXME, use a real flag for deciding about the key type */
if (root->fs_info->tree_root == root)
@@ -5745,6 +5746,7 @@
if (ret < 0)
goto err;
+ emitted = false;
while (1) {
leaf = path->nodes[0];
slot = path->slots[0];
@@ -5824,6 +5826,7 @@
if (over)
goto nopos;
+ emitted = true;
di_len = btrfs_dir_name_len(leaf, di) +
btrfs_dir_data_len(leaf, di) + sizeof(*di);
di_cur += di_len;
@@ -5836,11 +5839,20 @@
if (key_type == BTRFS_DIR_INDEX_KEY) {
if (is_curr)
ctx->pos++;
- ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list);
+ ret = btrfs_readdir_delayed_dir_index(ctx, &ins_list, &emitted);
if (ret)
goto nopos;
}
+ /*
+ * If we haven't emitted any dir entry, we must not touch ctx->pos as
+ * it was was set to the termination value in previous call. We assume
+ * that "." and ".." were emitted if we reach this point and set the
+ * termination value as well for an empty directory.
+ */
+ if (ctx->pos > 2 && !emitted)
+ goto nopos;
+
/* Reached end of directory/root. Bump pos past the last item. */
ctx->pos++;
@@ -7974,6 +7986,7 @@
kfree(dip);
+ dio_bio->bi_error = bio->bi_error;
dio_end_io(dio_bio, bio->bi_error);
if (io_bio->end_io)
@@ -8028,6 +8041,7 @@
kfree(dip);
+ dio_bio->bi_error = bio->bi_error;
dio_end_io(dio_bio, bio->bi_error);
bio_put(bio);
}
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index 952172c..48aee98 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -2794,24 +2794,29 @@
static struct page *extent_same_get_page(struct inode *inode, pgoff_t index)
{
struct page *page;
- struct extent_io_tree *tree = &BTRFS_I(inode)->io_tree;
page = grab_cache_page(inode->i_mapping, index);
if (!page)
- return NULL;
+ return ERR_PTR(-ENOMEM);
if (!PageUptodate(page)) {
- if (extent_read_full_page_nolock(tree, page, btrfs_get_extent,
- 0))
- return NULL;
+ int ret;
+
+ ret = btrfs_readpage(NULL, page);
+ if (ret)
+ return ERR_PTR(ret);
lock_page(page);
if (!PageUptodate(page)) {
unlock_page(page);
page_cache_release(page);
- return NULL;
+ return ERR_PTR(-EIO);
+ }
+ if (page->mapping != inode->i_mapping) {
+ unlock_page(page);
+ page_cache_release(page);
+ return ERR_PTR(-EAGAIN);
}
}
- unlock_page(page);
return page;
}
@@ -2823,17 +2828,31 @@
pgoff_t index = off >> PAGE_CACHE_SHIFT;
for (i = 0; i < num_pages; i++) {
+again:
pages[i] = extent_same_get_page(inode, index + i);
- if (!pages[i])
- return -ENOMEM;
+ if (IS_ERR(pages[i])) {
+ int err = PTR_ERR(pages[i]);
+
+ if (err == -EAGAIN)
+ goto again;
+ pages[i] = NULL;
+ return err;
+ }
}
return 0;
}
-static inline void lock_extent_range(struct inode *inode, u64 off, u64 len)
+static int lock_extent_range(struct inode *inode, u64 off, u64 len,
+ bool retry_range_locking)
{
- /* do any pending delalloc/csum calc on src, one way or
- another, and lock file content */
+ /*
+ * Do any pending delalloc/csum calculations on inode, one way or
+ * another, and lock file content.
+ * The locking order is:
+ *
+ * 1) pages
+ * 2) range in the inode's io tree
+ */
while (1) {
struct btrfs_ordered_extent *ordered;
lock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
@@ -2851,8 +2870,11 @@
unlock_extent(&BTRFS_I(inode)->io_tree, off, off + len - 1);
if (ordered)
btrfs_put_ordered_extent(ordered);
+ if (!retry_range_locking)
+ return -EAGAIN;
btrfs_wait_ordered_range(inode, off, len);
}
+ return 0;
}
static void btrfs_double_inode_unlock(struct inode *inode1, struct inode *inode2)
@@ -2877,15 +2899,24 @@
unlock_extent(&BTRFS_I(inode2)->io_tree, loff2, loff2 + len - 1);
}
-static void btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
- struct inode *inode2, u64 loff2, u64 len)
+static int btrfs_double_extent_lock(struct inode *inode1, u64 loff1,
+ struct inode *inode2, u64 loff2, u64 len,
+ bool retry_range_locking)
{
+ int ret;
+
if (inode1 < inode2) {
swap(inode1, inode2);
swap(loff1, loff2);
}
- lock_extent_range(inode1, loff1, len);
- lock_extent_range(inode2, loff2, len);
+ ret = lock_extent_range(inode1, loff1, len, retry_range_locking);
+ if (ret)
+ return ret;
+ ret = lock_extent_range(inode2, loff2, len, retry_range_locking);
+ if (ret)
+ unlock_extent(&BTRFS_I(inode1)->io_tree, loff1,
+ loff1 + len - 1);
+ return ret;
}
struct cmp_pages {
@@ -2901,11 +2932,15 @@
for (i = 0; i < cmp->num_pages; i++) {
pg = cmp->src_pages[i];
- if (pg)
+ if (pg) {
+ unlock_page(pg);
page_cache_release(pg);
+ }
pg = cmp->dst_pages[i];
- if (pg)
+ if (pg) {
+ unlock_page(pg);
page_cache_release(pg);
+ }
}
kfree(cmp->src_pages);
kfree(cmp->dst_pages);
@@ -2966,6 +3001,8 @@
src_page = cmp->src_pages[i];
dst_page = cmp->dst_pages[i];
+ ASSERT(PageLocked(src_page));
+ ASSERT(PageLocked(dst_page));
addr = kmap_atomic(src_page);
dst_addr = kmap_atomic(dst_page);
@@ -3078,14 +3115,46 @@
goto out_unlock;
}
+again:
ret = btrfs_cmp_data_prepare(src, loff, dst, dst_loff, olen, &cmp);
if (ret)
goto out_unlock;
if (same_inode)
- lock_extent_range(src, same_lock_start, same_lock_len);
+ ret = lock_extent_range(src, same_lock_start, same_lock_len,
+ false);
else
- btrfs_double_extent_lock(src, loff, dst, dst_loff, len);
+ ret = btrfs_double_extent_lock(src, loff, dst, dst_loff, len,
+ false);
+ /*
+ * If one of the inodes has dirty pages in the respective range or
+ * ordered extents, we need to flush dellaloc and wait for all ordered
+ * extents in the range. We must unlock the pages and the ranges in the
+ * io trees to avoid deadlocks when flushing delalloc (requires locking
+ * pages) and when waiting for ordered extents to complete (they require
+ * range locking).
+ */
+ if (ret == -EAGAIN) {
+ /*
+ * Ranges in the io trees already unlocked. Now unlock all
+ * pages before waiting for all IO to complete.
+ */
+ btrfs_cmp_data_free(&cmp);
+ if (same_inode) {
+ btrfs_wait_ordered_range(src, same_lock_start,
+ same_lock_len);
+ } else {
+ btrfs_wait_ordered_range(src, loff, len);
+ btrfs_wait_ordered_range(dst, dst_loff, len);
+ }
+ goto again;
+ }
+ ASSERT(ret == 0);
+ if (WARN_ON(ret)) {
+ /* ranges in the io trees already unlocked */
+ btrfs_cmp_data_free(&cmp);
+ return ret;
+ }
/* pass original length for comparison so we stay within i_size */
ret = btrfs_cmp_data(src, loff, dst, dst_loff, olen, &cmp);
@@ -3795,9 +3864,15 @@
u64 lock_start = min_t(u64, off, destoff);
u64 lock_len = max_t(u64, off, destoff) + len - lock_start;
- lock_extent_range(src, lock_start, lock_len);
+ ret = lock_extent_range(src, lock_start, lock_len, true);
} else {
- btrfs_double_extent_lock(src, off, inode, destoff, len);
+ ret = btrfs_double_extent_lock(src, off, inode, destoff, len,
+ true);
+ }
+ ASSERT(ret == 0);
+ if (WARN_ON(ret)) {
+ /* ranges in the io trees already unlocked */
+ goto out_unlock;
}
ret = btrfs_clone(src, inode, off, olen, len, destoff, 0);
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 7dc886c..e956cba 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -175,7 +175,7 @@
* string to the length of the original string to allow for worst case.
*/
md_len = strlen(sb_mountdata) + INET6_ADDRSTRLEN;
- mountdata = kzalloc(md_len + 1, GFP_KERNEL);
+ mountdata = kzalloc(md_len + sizeof("ip=") + 1, GFP_KERNEL);
if (mountdata == NULL) {
rc = -ENOMEM;
goto compose_mount_options_err;
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index afa09fc..e682b36 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -714,7 +714,7 @@
ses->auth_key.response = kmalloc(baselen + tilen, GFP_KERNEL);
if (!ses->auth_key.response) {
- rc = ENOMEM;
+ rc = -ENOMEM;
ses->auth_key.len = 0;
goto setup_ntlmv2_rsp_ret;
}
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 4fbd92d..a763cd3 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -2999,8 +2999,7 @@
if (ses_init_buf) {
ses_init_buf->trailer.session_req.called_len = 32;
- if (server->server_RFC1001_name &&
- server->server_RFC1001_name[0] != 0)
+ if (server->server_RFC1001_name[0] != 0)
rfc1002mangle(ses_init_buf->trailer.
session_req.called_name,
server->server_RFC1001_name,
diff --git a/fs/dax.c b/fs/dax.c
index fc2e314..7111724 100644
--- a/fs/dax.c
+++ b/fs/dax.c
@@ -79,15 +79,14 @@
}
/*
- * dax_clear_blocks() is called from within transaction context from XFS,
+ * dax_clear_sectors() is called from within transaction context from XFS,
* and hence this means the stack from this point must follow GFP_NOFS
* semantics for all operations.
*/
-int dax_clear_blocks(struct inode *inode, sector_t block, long _size)
+int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size)
{
- struct block_device *bdev = inode->i_sb->s_bdev;
struct blk_dax_ctl dax = {
- .sector = block << (inode->i_blkbits - 9),
+ .sector = _sector,
.size = _size,
};
@@ -109,7 +108,7 @@
wmb_pmem();
return 0;
}
-EXPORT_SYMBOL_GPL(dax_clear_blocks);
+EXPORT_SYMBOL_GPL(dax_clear_sectors);
/* the clear_pmem() calls are ordered by a wmb_pmem() in the caller */
static void dax_new_buf(void __pmem *addr, unsigned size, unsigned first,
@@ -485,11 +484,10 @@
* end]. This is required by data integrity operations to ensure file data is
* on persistent storage prior to completion of the operation.
*/
-int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
- loff_t end)
+int dax_writeback_mapping_range(struct address_space *mapping,
+ struct block_device *bdev, struct writeback_control *wbc)
{
struct inode *inode = mapping->host;
- struct block_device *bdev = inode->i_sb->s_bdev;
pgoff_t start_index, end_index, pmd_index;
pgoff_t indices[PAGEVEC_SIZE];
struct pagevec pvec;
@@ -500,8 +498,11 @@
if (WARN_ON_ONCE(inode->i_blkbits != PAGE_SHIFT))
return -EIO;
- start_index = start >> PAGE_CACHE_SHIFT;
- end_index = end >> PAGE_CACHE_SHIFT;
+ if (!mapping->nrexceptional || wbc->sync_mode != WB_SYNC_ALL)
+ return 0;
+
+ start_index = wbc->range_start >> PAGE_CACHE_SHIFT;
+ end_index = wbc->range_end >> PAGE_CACHE_SHIFT;
pmd_index = DAX_PMD_INDEX(start_index);
rcu_read_lock();
diff --git a/fs/devpts/inode.c b/fs/devpts/inode.c
index 1f107fd..655f21f 100644
--- a/fs/devpts/inode.c
+++ b/fs/devpts/inode.c
@@ -575,6 +575,26 @@
mutex_unlock(&allocated_ptys_lock);
}
+/*
+ * pty code needs to hold extra references in case of last /dev/tty close
+ */
+
+void devpts_add_ref(struct inode *ptmx_inode)
+{
+ struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+ atomic_inc(&sb->s_active);
+ ihold(ptmx_inode);
+}
+
+void devpts_del_ref(struct inode *ptmx_inode)
+{
+ struct super_block *sb = pts_sb_from_inode(ptmx_inode);
+
+ iput(ptmx_inode);
+ deactivate_super(sb);
+}
+
/**
* devpts_pty_new -- create a new inode in /dev/pts/
* @ptmx_inode: inode of the master
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 1b2f7ff..d6a9012 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -472,8 +472,8 @@
dio->io_error = -EIO;
if (dio->is_async && dio->rw == READ && dio->should_dirty) {
- bio_check_pages_dirty(bio); /* transfers ownership */
err = bio->bi_error;
+ bio_check_pages_dirty(bio); /* transfers ownership */
} else {
bio_for_each_segment_all(bvec, bio, i) {
struct page *page = bvec->bv_page;
diff --git a/fs/efivarfs/file.c b/fs/efivarfs/file.c
index c424e48..d48e0d2 100644
--- a/fs/efivarfs/file.c
+++ b/fs/efivarfs/file.c
@@ -10,6 +10,7 @@
#include <linux/efi.h>
#include <linux/fs.h>
#include <linux/slab.h>
+#include <linux/mount.h>
#include "internal.h"
@@ -103,9 +104,78 @@
return size;
}
+static int
+efivarfs_ioc_getxflags(struct file *file, void __user *arg)
+{
+ struct inode *inode = file->f_mapping->host;
+ unsigned int i_flags;
+ unsigned int flags = 0;
+
+ i_flags = inode->i_flags;
+ if (i_flags & S_IMMUTABLE)
+ flags |= FS_IMMUTABLE_FL;
+
+ if (copy_to_user(arg, &flags, sizeof(flags)))
+ return -EFAULT;
+ return 0;
+}
+
+static int
+efivarfs_ioc_setxflags(struct file *file, void __user *arg)
+{
+ struct inode *inode = file->f_mapping->host;
+ unsigned int flags;
+ unsigned int i_flags = 0;
+ int error;
+
+ if (!inode_owner_or_capable(inode))
+ return -EACCES;
+
+ if (copy_from_user(&flags, arg, sizeof(flags)))
+ return -EFAULT;
+
+ if (flags & ~FS_IMMUTABLE_FL)
+ return -EOPNOTSUPP;
+
+ if (!capable(CAP_LINUX_IMMUTABLE))
+ return -EPERM;
+
+ if (flags & FS_IMMUTABLE_FL)
+ i_flags |= S_IMMUTABLE;
+
+
+ error = mnt_want_write_file(file);
+ if (error)
+ return error;
+
+ inode_lock(inode);
+ inode_set_flags(inode, i_flags, S_IMMUTABLE);
+ inode_unlock(inode);
+
+ mnt_drop_write_file(file);
+
+ return 0;
+}
+
+long
+efivarfs_file_ioctl(struct file *file, unsigned int cmd, unsigned long p)
+{
+ void __user *arg = (void __user *)p;
+
+ switch (cmd) {
+ case FS_IOC_GETFLAGS:
+ return efivarfs_ioc_getxflags(file, arg);
+ case FS_IOC_SETFLAGS:
+ return efivarfs_ioc_setxflags(file, arg);
+ }
+
+ return -ENOTTY;
+}
+
const struct file_operations efivarfs_file_operations = {
.open = simple_open,
.read = efivarfs_file_read,
.write = efivarfs_file_write,
.llseek = no_llseek,
+ .unlocked_ioctl = efivarfs_file_ioctl,
};
diff --git a/fs/efivarfs/inode.c b/fs/efivarfs/inode.c
index 3381b9d..e2ab6d0 100644
--- a/fs/efivarfs/inode.c
+++ b/fs/efivarfs/inode.c
@@ -15,7 +15,8 @@
#include "internal.h"
struct inode *efivarfs_get_inode(struct super_block *sb,
- const struct inode *dir, int mode, dev_t dev)
+ const struct inode *dir, int mode,
+ dev_t dev, bool is_removable)
{
struct inode *inode = new_inode(sb);
@@ -23,6 +24,7 @@
inode->i_ino = get_next_ino();
inode->i_mode = mode;
inode->i_atime = inode->i_mtime = inode->i_ctime = CURRENT_TIME;
+ inode->i_flags = is_removable ? 0 : S_IMMUTABLE;
switch (mode & S_IFMT) {
case S_IFREG:
inode->i_fop = &efivarfs_file_operations;
@@ -102,22 +104,17 @@
static int efivarfs_create(struct inode *dir, struct dentry *dentry,
umode_t mode, bool excl)
{
- struct inode *inode;
+ struct inode *inode = NULL;
struct efivar_entry *var;
int namelen, i = 0, err = 0;
+ bool is_removable = false;
if (!efivarfs_valid_name(dentry->d_name.name, dentry->d_name.len))
return -EINVAL;
- inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0);
- if (!inode)
- return -ENOMEM;
-
var = kzalloc(sizeof(struct efivar_entry), GFP_KERNEL);
- if (!var) {
- err = -ENOMEM;
- goto out;
- }
+ if (!var)
+ return -ENOMEM;
/* length of the variable name itself: remove GUID and separator */
namelen = dentry->d_name.len - EFI_VARIABLE_GUID_LEN - 1;
@@ -125,6 +122,16 @@
efivarfs_hex_to_guid(dentry->d_name.name + namelen + 1,
&var->var.VendorGuid);
+ if (efivar_variable_is_removable(var->var.VendorGuid,
+ dentry->d_name.name, namelen))
+ is_removable = true;
+
+ inode = efivarfs_get_inode(dir->i_sb, dir, mode, 0, is_removable);
+ if (!inode) {
+ err = -ENOMEM;
+ goto out;
+ }
+
for (i = 0; i < namelen; i++)
var->var.VariableName[i] = dentry->d_name.name[i];
@@ -138,7 +145,8 @@
out:
if (err) {
kfree(var);
- iput(inode);
+ if (inode)
+ iput(inode);
}
return err;
}
diff --git a/fs/efivarfs/internal.h b/fs/efivarfs/internal.h
index b5ff16a..b450518 100644
--- a/fs/efivarfs/internal.h
+++ b/fs/efivarfs/internal.h
@@ -15,7 +15,8 @@
extern const struct inode_operations efivarfs_dir_inode_operations;
extern bool efivarfs_valid_name(const char *str, int len);
extern struct inode *efivarfs_get_inode(struct super_block *sb,
- const struct inode *dir, int mode, dev_t dev);
+ const struct inode *dir, int mode, dev_t dev,
+ bool is_removable);
extern struct list_head efivarfs_list;
diff --git a/fs/efivarfs/super.c b/fs/efivarfs/super.c
index b8a564f..dd029d1 100644
--- a/fs/efivarfs/super.c
+++ b/fs/efivarfs/super.c
@@ -118,8 +118,9 @@
struct dentry *dentry, *root = sb->s_root;
unsigned long size = 0;
char *name;
- int len, i;
+ int len;
int err = -ENOMEM;
+ bool is_removable = false;
entry = kzalloc(sizeof(*entry), GFP_KERNEL);
if (!entry)
@@ -128,15 +129,17 @@
memcpy(entry->var.VariableName, name16, name_size);
memcpy(&(entry->var.VendorGuid), &vendor, sizeof(efi_guid_t));
- len = ucs2_strlen(entry->var.VariableName);
+ len = ucs2_utf8size(entry->var.VariableName);
/* name, plus '-', plus GUID, plus NUL*/
name = kmalloc(len + 1 + EFI_VARIABLE_GUID_LEN + 1, GFP_KERNEL);
if (!name)
goto fail;
- for (i = 0; i < len; i++)
- name[i] = entry->var.VariableName[i] & 0xFF;
+ ucs2_as_utf8(name, entry->var.VariableName, len);
+
+ if (efivar_variable_is_removable(entry->var.VendorGuid, name, len))
+ is_removable = true;
name[len] = '-';
@@ -144,7 +147,8 @@
name[len + EFI_VARIABLE_GUID_LEN+1] = '\0';
- inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0);
+ inode = efivarfs_get_inode(sb, d_inode(root), S_IFREG | 0644, 0,
+ is_removable);
if (!inode)
goto fail_name;
@@ -200,7 +204,7 @@
sb->s_d_op = &efivarfs_d_ops;
sb->s_time_gran = 1;
- inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0);
+ inode = efivarfs_get_inode(sb, NULL, S_IFDIR | 0755, 0, true);
if (!inode)
return -ENOMEM;
inode->i_op = &efivarfs_dir_inode_operations;
diff --git a/fs/ext2/file.c b/fs/ext2/file.c
index 2c88d68..c1400b1 100644
--- a/fs/ext2/file.c
+++ b/fs/ext2/file.c
@@ -80,23 +80,6 @@
return ret;
}
-static int ext2_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- struct inode *inode = file_inode(vma->vm_file);
- struct ext2_inode_info *ei = EXT2_I(inode);
- int ret;
-
- sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
- down_read(&ei->dax_sem);
-
- ret = __dax_mkwrite(vma, vmf, ext2_get_block, NULL);
-
- up_read(&ei->dax_sem);
- sb_end_pagefault(inode->i_sb);
- return ret;
-}
-
static int ext2_dax_pfn_mkwrite(struct vm_area_struct *vma,
struct vm_fault *vmf)
{
@@ -124,7 +107,7 @@
static const struct vm_operations_struct ext2_dax_vm_ops = {
.fault = ext2_dax_fault,
.pmd_fault = ext2_dax_pmd_fault,
- .page_mkwrite = ext2_dax_mkwrite,
+ .page_mkwrite = ext2_dax_fault,
.pfn_mkwrite = ext2_dax_pfn_mkwrite,
};
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 338eefd..6bd58e6 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -737,8 +737,10 @@
* so that it's not found by another thread before it's
* initialised
*/
- err = dax_clear_blocks(inode, le32_to_cpu(chain[depth-1].key),
- 1 << inode->i_blkbits);
+ err = dax_clear_sectors(inode->i_sb->s_bdev,
+ le32_to_cpu(chain[depth-1].key) <<
+ (inode->i_blkbits - 9),
+ 1 << inode->i_blkbits);
if (err) {
mutex_unlock(&ei->truncate_mutex);
goto cleanup;
@@ -874,6 +876,14 @@
static int
ext2_writepages(struct address_space *mapping, struct writeback_control *wbc)
{
+#ifdef CONFIG_FS_DAX
+ if (dax_mapping(mapping)) {
+ return dax_writeback_mapping_range(mapping,
+ mapping->host->i_sb->s_bdev,
+ wbc);
+ }
+#endif
+
return mpage_writepages(mapping, wbc, ext2_get_block);
}
@@ -1296,7 +1306,7 @@
inode->i_flags |= S_NOATIME;
if (flags & EXT2_DIRSYNC_FL)
inode->i_flags |= S_DIRSYNC;
- if (test_opt(inode->i_sb, DAX))
+ if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
inode->i_flags |= S_DAX;
}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index ec0668a..fe1f50f 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -191,7 +191,6 @@
/* If checksum is bad mark all blocks used to prevent allocation
* essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
- ext4_error(sb, "Checksum bad for group %u", block_group);
grp = ext4_get_group_info(sb, block_group);
if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -442,14 +441,16 @@
}
ext4_lock_group(sb, block_group);
if (desc->bg_flags & cpu_to_le16(EXT4_BG_BLOCK_UNINIT)) {
-
err = ext4_init_block_bitmap(sb, bh, block_group, desc);
set_bitmap_uptodate(bh);
set_buffer_uptodate(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
- if (err)
+ if (err) {
+ ext4_error(sb, "Failed to init block bitmap for group "
+ "%u: %d", block_group, err);
goto out;
+ }
goto verify;
}
ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/crypto.c b/fs/ext4/crypto.c
index c802120..38f7562 100644
--- a/fs/ext4/crypto.c
+++ b/fs/ext4/crypto.c
@@ -467,3 +467,59 @@
return size;
return 0;
}
+
+/*
+ * Validate dentries for encrypted directories to make sure we aren't
+ * potentially caching stale data after a key has been added or
+ * removed.
+ */
+static int ext4_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ struct inode *dir = d_inode(dentry->d_parent);
+ struct ext4_crypt_info *ci = EXT4_I(dir)->i_crypt_info;
+ int dir_has_key, cached_with_key;
+
+ if (!ext4_encrypted_inode(dir))
+ return 0;
+
+ if (ci && ci->ci_keyring_key &&
+ (ci->ci_keyring_key->flags & ((1 << KEY_FLAG_INVALIDATED) |
+ (1 << KEY_FLAG_REVOKED) |
+ (1 << KEY_FLAG_DEAD))))
+ ci = NULL;
+
+ /* this should eventually be an flag in d_flags */
+ cached_with_key = dentry->d_fsdata != NULL;
+ dir_has_key = (ci != NULL);
+
+ /*
+ * If the dentry was cached without the key, and it is a
+ * negative dentry, it might be a valid name. We can't check
+ * if the key has since been made available due to locking
+ * reasons, so we fail the validation so ext4_lookup() can do
+ * this check.
+ *
+ * We also fail the validation if the dentry was created with
+ * the key present, but we no longer have the key, or vice versa.
+ */
+ if ((!cached_with_key && d_is_negative(dentry)) ||
+ (!cached_with_key && dir_has_key) ||
+ (cached_with_key && !dir_has_key)) {
+#if 0 /* Revalidation debug */
+ char buf[80];
+ char *cp = simple_dname(dentry, buf, sizeof(buf));
+
+ if (IS_ERR(cp))
+ cp = (char *) "???";
+ pr_err("revalidate: %s %p %d %d %d\n", cp, dentry->d_fsdata,
+ cached_with_key, d_is_negative(dentry),
+ dir_has_key);
+#endif
+ return 0;
+ }
+ return 1;
+}
+
+const struct dentry_operations ext4_encrypted_d_ops = {
+ .d_revalidate = ext4_d_revalidate,
+};
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index 1d1bca7..33f5e2a 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -111,6 +111,12 @@
int dir_has_error = 0;
struct ext4_str fname_crypto_str = {.name = NULL, .len = 0};
+ if (ext4_encrypted_inode(inode)) {
+ err = ext4_get_encryption_info(inode);
+ if (err && err != -ENOKEY)
+ return err;
+ }
+
if (is_dx_dir(inode)) {
err = ext4_dx_readdir(file, ctx);
if (err != ERR_BAD_DX_DIR) {
@@ -157,8 +163,11 @@
index, 1);
file->f_ra.prev_pos = (loff_t)index << PAGE_CACHE_SHIFT;
bh = ext4_bread(NULL, inode, map.m_lblk, 0);
- if (IS_ERR(bh))
- return PTR_ERR(bh);
+ if (IS_ERR(bh)) {
+ err = PTR_ERR(bh);
+ bh = NULL;
+ goto errout;
+ }
}
if (!bh) {
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0662b28..157b458 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -2302,6 +2302,7 @@
int ext4_decrypt(struct page *page);
int ext4_encrypted_zeroout(struct inode *inode, ext4_lblk_t lblk,
ext4_fsblk_t pblk, ext4_lblk_t len);
+extern const struct dentry_operations ext4_encrypted_d_ops;
#ifdef CONFIG_EXT4_FS_ENCRYPTION
int ext4_init_crypto(void);
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 0ffabaf..3753ceb 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3928,7 +3928,7 @@
convert_initialized_extent(handle_t *handle, struct inode *inode,
struct ext4_map_blocks *map,
struct ext4_ext_path **ppath, int flags,
- unsigned int allocated, ext4_fsblk_t newblock)
+ unsigned int allocated)
{
struct ext4_ext_path *path = *ppath;
struct ext4_extent *ex;
@@ -4347,7 +4347,7 @@
(flags & EXT4_GET_BLOCKS_CONVERT_UNWRITTEN)) {
allocated = convert_initialized_extent(
handle, inode, map, &path,
- flags, allocated, newblock);
+ flags, allocated);
goto out2;
} else if (!ext4_ext_is_unwritten(ex))
goto out;
diff --git a/fs/ext4/file.c b/fs/ext4/file.c
index 1126436..4cd318f 100644
--- a/fs/ext4/file.c
+++ b/fs/ext4/file.c
@@ -262,23 +262,8 @@
return result;
}
-static int ext4_dax_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
-{
- int err;
- struct inode *inode = file_inode(vma->vm_file);
-
- sb_start_pagefault(inode->i_sb);
- file_update_time(vma->vm_file);
- down_read(&EXT4_I(inode)->i_mmap_sem);
- err = __dax_mkwrite(vma, vmf, ext4_dax_mmap_get_block, NULL);
- up_read(&EXT4_I(inode)->i_mmap_sem);
- sb_end_pagefault(inode->i_sb);
-
- return err;
-}
-
/*
- * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_mkwrite()
+ * Handle write fault for VM_MIXEDMAP mappings. Similarly to ext4_dax_fault()
* handler we check for races agaist truncate. Note that since we cycle through
* i_mmap_sem, we are sure that also any hole punching that began before we
* were called is finished by now and so if it included part of the file we
@@ -311,7 +296,7 @@
static const struct vm_operations_struct ext4_dax_vm_ops = {
.fault = ext4_dax_fault,
.pmd_fault = ext4_dax_pmd_fault,
- .page_mkwrite = ext4_dax_mkwrite,
+ .page_mkwrite = ext4_dax_fault,
.pfn_mkwrite = ext4_dax_pfn_mkwrite,
};
#else
@@ -350,6 +335,7 @@
struct super_block *sb = inode->i_sb;
struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
struct vfsmount *mnt = filp->f_path.mnt;
+ struct inode *dir = filp->f_path.dentry->d_parent->d_inode;
struct path path;
char buf[64], *cp;
int ret;
@@ -393,6 +379,14 @@
if (ext4_encryption_info(inode) == NULL)
return -ENOKEY;
}
+ if (ext4_encrypted_inode(dir) &&
+ !ext4_is_child_context_consistent_with_parent(dir, inode)) {
+ ext4_warning(inode->i_sb,
+ "Inconsistent encryption contexts: %lu/%lu\n",
+ (unsigned long) dir->i_ino,
+ (unsigned long) inode->i_ino);
+ return -EPERM;
+ }
/*
* Set up the jbd2_inode if we are opening the inode for
* writing and the journal is present
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 3fcfd50..acc0ad5 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -76,7 +76,6 @@
/* If checksum is bad mark all blocks and inodes use to prevent
* allocation, essentially implementing a per-group read-only flag. */
if (!ext4_group_desc_csum_verify(sb, block_group, gdp)) {
- ext4_error(sb, "Checksum bad for group %u", block_group);
grp = ext4_get_group_info(sb, block_group);
if (!EXT4_MB_GRP_BBITMAP_CORRUPT(grp))
percpu_counter_sub(&sbi->s_freeclusters_counter,
@@ -191,8 +190,11 @@
set_buffer_verified(bh);
ext4_unlock_group(sb, block_group);
unlock_buffer(bh);
- if (err)
+ if (err) {
+ ext4_error(sb, "Failed to init inode bitmap for group "
+ "%u: %d", block_group, err);
goto out;
+ }
return bh;
}
ext4_unlock_group(sb, block_group);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 83bc8bf..aee960b 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -686,6 +686,34 @@
return retval;
}
+/*
+ * Update EXT4_MAP_FLAGS in bh->b_state. For buffer heads attached to pages
+ * we have to be careful as someone else may be manipulating b_state as well.
+ */
+static void ext4_update_bh_state(struct buffer_head *bh, unsigned long flags)
+{
+ unsigned long old_state;
+ unsigned long new_state;
+
+ flags &= EXT4_MAP_FLAGS;
+
+ /* Dummy buffer_head? Set non-atomically. */
+ if (!bh->b_page) {
+ bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | flags;
+ return;
+ }
+ /*
+ * Someone else may be modifying b_state. Be careful! This is ugly but
+ * once we get rid of using bh as a container for mapping information
+ * to pass to / from get_block functions, this can go away.
+ */
+ do {
+ old_state = READ_ONCE(bh->b_state);
+ new_state = (old_state & ~EXT4_MAP_FLAGS) | flags;
+ } while (unlikely(
+ cmpxchg(&bh->b_state, old_state, new_state) != old_state));
+}
+
/* Maximum number of blocks we map for direct IO at once. */
#define DIO_MAX_BLOCKS 4096
@@ -722,7 +750,7 @@
ext4_io_end_t *io_end = ext4_inode_aio(inode);
map_bh(bh, inode->i_sb, map.m_pblk);
- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+ ext4_update_bh_state(bh, map.m_flags);
if (io_end && io_end->flag & EXT4_IO_END_UNWRITTEN)
set_buffer_defer_completion(bh);
bh->b_size = inode->i_sb->s_blocksize * map.m_len;
@@ -1685,7 +1713,7 @@
return ret;
map_bh(bh, inode->i_sb, map.m_pblk);
- bh->b_state = (bh->b_state & ~EXT4_MAP_FLAGS) | map.m_flags;
+ ext4_update_bh_state(bh, map.m_flags);
if (buffer_unwritten(bh)) {
/* A delayed write to unwritten bh should be marked
@@ -2450,6 +2478,10 @@
trace_ext4_writepages(inode, wbc);
+ if (dax_mapping(mapping))
+ return dax_writeback_mapping_range(mapping, inode->i_sb->s_bdev,
+ wbc);
+
/*
* No pages to write? This is mainly a kludge to avoid starting
* a transaction for special inodes like journal inode on last iput()
@@ -3253,29 +3285,29 @@
* case, we allocate an io_end structure to hook to the iocb.
*/
iocb->private = NULL;
- ext4_inode_aio_set(inode, NULL);
- if (!is_sync_kiocb(iocb)) {
- io_end = ext4_init_io_end(inode, GFP_NOFS);
- if (!io_end) {
- ret = -ENOMEM;
- goto retake_lock;
- }
- /*
- * Grab reference for DIO. Will be dropped in ext4_end_io_dio()
- */
- iocb->private = ext4_get_io_end(io_end);
- /*
- * we save the io structure for current async direct
- * IO, so that later ext4_map_blocks() could flag the
- * io structure whether there is a unwritten extents
- * needs to be converted when IO is completed.
- */
- ext4_inode_aio_set(inode, io_end);
- }
-
if (overwrite) {
get_block_func = ext4_get_block_overwrite;
} else {
+ ext4_inode_aio_set(inode, NULL);
+ if (!is_sync_kiocb(iocb)) {
+ io_end = ext4_init_io_end(inode, GFP_NOFS);
+ if (!io_end) {
+ ret = -ENOMEM;
+ goto retake_lock;
+ }
+ /*
+ * Grab reference for DIO. Will be dropped in
+ * ext4_end_io_dio()
+ */
+ iocb->private = ext4_get_io_end(io_end);
+ /*
+ * we save the io structure for current async direct
+ * IO, so that later ext4_map_blocks() could flag the
+ * io structure whether there is a unwritten extents
+ * needs to be converted when IO is completed.
+ */
+ ext4_inode_aio_set(inode, io_end);
+ }
get_block_func = ext4_get_block_write;
dio_flags = DIO_LOCKING;
}
@@ -4127,7 +4159,7 @@
new_fl |= S_NOATIME;
if (flags & EXT4_DIRSYNC_FL)
new_fl |= S_DIRSYNC;
- if (test_opt(inode->i_sb, DAX))
+ if (test_opt(inode->i_sb, DAX) && S_ISREG(inode->i_mode))
new_fl |= S_DAX;
inode_set_flags(inode, new_fl,
S_SYNC|S_APPEND|S_IMMUTABLE|S_NOATIME|S_DIRSYNC|S_DAX);
diff --git a/fs/ext4/ioctl.c b/fs/ext4/ioctl.c
index 0f6c369..eae5917 100644
--- a/fs/ext4/ioctl.c
+++ b/fs/ext4/ioctl.c
@@ -208,7 +208,7 @@
{
struct ext4_inode_info *ei = EXT4_I(inode);
handle_t *handle = NULL;
- int err = EPERM, migrate = 0;
+ int err = -EPERM, migrate = 0;
struct ext4_iloc iloc;
unsigned int oldflags, mask, i;
unsigned int jflag;
@@ -583,6 +583,11 @@
"Online defrag not supported with bigalloc");
err = -EOPNOTSUPP;
goto mext_out;
+ } else if (IS_DAX(inode)) {
+ ext4_msg(sb, KERN_ERR,
+ "Online defrag not supported with DAX");
+ err = -EOPNOTSUPP;
+ goto mext_out;
}
err = mnt_want_write_file(filp);
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 61eaf74..4424b7b 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -2285,7 +2285,7 @@
if (group == 0)
seq_puts(seq, "#group: free frags first ["
" 2^0 2^1 2^2 2^3 2^4 2^5 2^6 "
- " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]");
+ " 2^7 2^8 2^9 2^10 2^11 2^12 2^13 ]\n");
i = (sb->s_blocksize_bits + 2) * sizeof(sg.info.bb_counters[0]) +
sizeof(struct ext4_group_info);
diff --git a/fs/ext4/move_extent.c b/fs/ext4/move_extent.c
index fb6f117..e032a04 100644
--- a/fs/ext4/move_extent.c
+++ b/fs/ext4/move_extent.c
@@ -265,11 +265,12 @@
ext4_lblk_t orig_blk_offset, donor_blk_offset;
unsigned long blocksize = orig_inode->i_sb->s_blocksize;
unsigned int tmp_data_size, data_size, replaced_size;
- int err2, jblocks, retries = 0;
+ int i, err2, jblocks, retries = 0;
int replaced_count = 0;
int from = data_offset_in_page << orig_inode->i_blkbits;
int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits;
struct super_block *sb = orig_inode->i_sb;
+ struct buffer_head *bh = NULL;
/*
* It needs twice the amount of ordinary journal buffers because
@@ -380,8 +381,16 @@
}
/* Perform all necessary steps similar write_begin()/write_end()
* but keeping in mind that i_size will not change */
- *err = __block_write_begin(pagep[0], from, replaced_size,
- ext4_get_block);
+ if (!page_has_buffers(pagep[0]))
+ create_empty_buffers(pagep[0], 1 << orig_inode->i_blkbits, 0);
+ bh = page_buffers(pagep[0]);
+ for (i = 0; i < data_offset_in_page; i++)
+ bh = bh->b_this_page;
+ for (i = 0; i < block_len_in_page; i++) {
+ *err = ext4_get_block(orig_inode, orig_blk_offset + i, bh, 0);
+ if (*err < 0)
+ break;
+ }
if (!*err)
*err = block_commit_write(pagep[0], from, from + replaced_size);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 06574dd..48e4b89 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1558,6 +1558,24 @@
struct ext4_dir_entry_2 *de;
struct buffer_head *bh;
+ if (ext4_encrypted_inode(dir)) {
+ int res = ext4_get_encryption_info(dir);
+
+ /*
+ * This should be a properly defined flag for
+ * dentry->d_flags when we uplift this to the VFS.
+ * d_fsdata is set to (void *) 1 if if the dentry is
+ * created while the directory was encrypted and we
+ * don't have access to the key.
+ */
+ dentry->d_fsdata = NULL;
+ if (ext4_encryption_info(dir))
+ dentry->d_fsdata = (void *) 1;
+ d_set_d_op(dentry, &ext4_encrypted_d_ops);
+ if (res && res != -ENOKEY)
+ return ERR_PTR(res);
+ }
+
if (dentry->d_name.len > EXT4_NAME_LEN)
return ERR_PTR(-ENAMETOOLONG);
@@ -1585,11 +1603,15 @@
return ERR_PTR(-EFSCORRUPTED);
}
if (!IS_ERR(inode) && ext4_encrypted_inode(dir) &&
- (S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode) ||
- S_ISLNK(inode->i_mode)) &&
+ (S_ISDIR(inode->i_mode) || S_ISLNK(inode->i_mode)) &&
!ext4_is_child_context_consistent_with_parent(dir,
inode)) {
+ int nokey = ext4_encrypted_inode(inode) &&
+ !ext4_encryption_info(inode);
+
iput(inode);
+ if (nokey)
+ return ERR_PTR(-ENOKEY);
ext4_warning(inode->i_sb,
"Inconsistent encryption contexts: %lu/%lu\n",
(unsigned long) dir->i_ino,
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index ad62d7a..34038e3 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -198,7 +198,7 @@
if (flex_gd == NULL)
goto out3;
- if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_flex_group_data))
+ if (flexbg_size >= UINT_MAX / sizeof(struct ext4_new_group_data))
goto out2;
flex_gd->count = flexbg_size;
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 6915c95..1f76d89 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -317,6 +317,7 @@
struct inode_switch_wbs_context *isw =
container_of(work, struct inode_switch_wbs_context, work);
struct inode *inode = isw->inode;
+ struct super_block *sb = inode->i_sb;
struct address_space *mapping = inode->i_mapping;
struct bdi_writeback *old_wb = inode->i_wb;
struct bdi_writeback *new_wb = isw->new_wb;
@@ -423,6 +424,7 @@
wb_put(new_wb);
iput(inode);
+ deactivate_super(sb);
kfree(isw);
}
@@ -469,11 +471,14 @@
/* while holding I_WB_SWITCH, no one else can update the association */
spin_lock(&inode->i_lock);
+
if (inode->i_state & (I_WB_SWITCH | I_FREEING) ||
- inode_to_wb(inode) == isw->new_wb) {
- spin_unlock(&inode->i_lock);
- goto out_free;
- }
+ inode_to_wb(inode) == isw->new_wb)
+ goto out_unlock;
+
+ if (!atomic_inc_not_zero(&inode->i_sb->s_active))
+ goto out_unlock;
+
inode->i_state |= I_WB_SWITCH;
spin_unlock(&inode->i_lock);
@@ -489,6 +494,8 @@
call_rcu(&isw->rcu_head, inode_switch_wbs_rcu_fn);
return;
+out_unlock:
+ spin_unlock(&inode->i_lock);
out_free:
if (isw->new_wb)
wb_put(isw->new_wb);
diff --git a/fs/hpfs/namei.c b/fs/hpfs/namei.c
index 506765a..bb8d67e 100644
--- a/fs/hpfs/namei.c
+++ b/fs/hpfs/namei.c
@@ -376,12 +376,11 @@
struct inode *inode = d_inode(dentry);
dnode_secno dno;
int r;
- int rep = 0;
int err;
hpfs_lock(dir->i_sb);
hpfs_adjust_length(name, &len);
-again:
+
err = -ENOENT;
de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, &dno, &qbh);
if (!de)
@@ -401,33 +400,9 @@
hpfs_error(dir->i_sb, "there was error when removing dirent");
err = -EFSERROR;
break;
- case 2: /* no space for deleting, try to truncate file */
-
+ case 2: /* no space for deleting */
err = -ENOSPC;
- if (rep++)
- break;
-
- dentry_unhash(dentry);
- if (!d_unhashed(dentry)) {
- hpfs_unlock(dir->i_sb);
- return -ENOSPC;
- }
- if (generic_permission(inode, MAY_WRITE) ||
- !S_ISREG(inode->i_mode) ||
- get_write_access(inode)) {
- d_rehash(dentry);
- } else {
- struct iattr newattrs;
- /*pr_info("truncating file before delete.\n");*/
- newattrs.ia_size = 0;
- newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
- err = notify_change(dentry, &newattrs, NULL);
- put_write_access(inode);
- if (!err)
- goto again;
- }
- hpfs_unlock(dir->i_sb);
- return -ENOSPC;
+ break;
default:
drop_nlink(inode);
err = 0;
diff --git a/fs/inode.c b/fs/inode.c
index 9f62db3..69b8b52 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -154,6 +154,12 @@
inode->i_rdev = 0;
inode->dirtied_when = 0;
+#ifdef CONFIG_CGROUP_WRITEBACK
+ inode->i_wb_frn_winner = 0;
+ inode->i_wb_frn_avg_time = 0;
+ inode->i_wb_frn_history = 0;
+#endif
+
if (security_inode_alloc(inode))
goto out;
spin_lock_init(&inode->i_lock);
diff --git a/fs/namei.c b/fs/namei.c
index f624d13..9c590e0 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1712,6 +1712,11 @@
return 0;
if (!follow)
return 0;
+ /* make sure that d_is_symlink above matches inode */
+ if (nd->flags & LOOKUP_RCU) {
+ if (read_seqcount_retry(&link->dentry->d_seq, seq))
+ return -ECHILD;
+ }
return pick_link(nd, link, inode, seq);
}
@@ -1743,11 +1748,11 @@
if (err < 0)
return err;
- inode = d_backing_inode(path.dentry);
seq = 0; /* we are already out of RCU mode */
err = -ENOENT;
if (d_is_negative(path.dentry))
goto out_path_put;
+ inode = d_backing_inode(path.dentry);
}
if (flags & WALK_PUT)
@@ -3192,12 +3197,12 @@
return error;
BUG_ON(nd->flags & LOOKUP_RCU);
- inode = d_backing_inode(path.dentry);
seq = 0; /* out of RCU mode, so the value doesn't matter */
if (unlikely(d_is_negative(path.dentry))) {
path_to_nameidata(&path, nd);
return -ENOENT;
}
+ inode = d_backing_inode(path.dentry);
finish_lookup:
if (nd->depth)
put_link(nd);
@@ -3206,11 +3211,6 @@
if (unlikely(error))
return error;
- if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
- path_to_nameidata(&path, nd);
- return -ELOOP;
- }
-
if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
path_to_nameidata(&path, nd);
} else {
@@ -3229,6 +3229,10 @@
return error;
}
audit_inode(nd->name, nd->path.dentry, 0);
+ if (unlikely(d_is_symlink(nd->path.dentry)) && !(open_flag & O_PATH)) {
+ error = -ELOOP;
+ goto out;
+ }
error = -EISDIR;
if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
goto out;
@@ -3273,6 +3277,10 @@
goto exit_fput;
}
out:
+ if (unlikely(error > 0)) {
+ WARN_ON(1);
+ error = -EINVAL;
+ }
if (got_write)
mnt_drop_write(nd->path.mnt);
path_put(&save_parent);
diff --git a/fs/nfs/blocklayout/extent_tree.c b/fs/nfs/blocklayout/extent_tree.c
index c59a59c..35ab51c 100644
--- a/fs/nfs/blocklayout/extent_tree.c
+++ b/fs/nfs/blocklayout/extent_tree.c
@@ -476,6 +476,7 @@
for (i = 0; i < nr_pages; i++)
put_page(arg->layoutupdate_pages[i]);
+ vfree(arg->start_p);
kfree(arg->layoutupdate_pages);
} else {
put_page(arg->layoutupdate_page);
@@ -559,10 +560,15 @@
if (unlikely(arg->layoutupdate_pages != &arg->layoutupdate_page)) {
void *p = start_p, *end = p + arg->layoutupdate_len;
+ struct page *page = NULL;
int i = 0;
- for ( ; p < end; p += PAGE_SIZE)
- arg->layoutupdate_pages[i++] = vmalloc_to_page(p);
+ arg->start_p = start_p;
+ for ( ; p < end; p += PAGE_SIZE) {
+ page = vmalloc_to_page(p);
+ arg->layoutupdate_pages[i++] = page;
+ get_page(page);
+ }
}
dprintk("%s found %zu ranges\n", __func__, count);
diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c
index bd25dc7..dff8346 100644
--- a/fs/nfs/nfs42proc.c
+++ b/fs/nfs/nfs42proc.c
@@ -16,29 +16,8 @@
#define NFSDBG_FACILITY NFSDBG_PROC
-static int nfs42_set_rw_stateid(nfs4_stateid *dst, struct file *file,
- fmode_t fmode)
-{
- struct nfs_open_context *open;
- struct nfs_lock_context *lock;
- int ret;
-
- open = get_nfs_open_context(nfs_file_open_context(file));
- lock = nfs_get_lock_context(open);
- if (IS_ERR(lock)) {
- put_nfs_open_context(open);
- return PTR_ERR(lock);
- }
-
- ret = nfs4_set_rw_stateid(dst, open, lock, fmode);
-
- nfs_put_lock_context(lock);
- put_nfs_open_context(open);
- return ret;
-}
-
static int _nfs42_proc_fallocate(struct rpc_message *msg, struct file *filep,
- loff_t offset, loff_t len)
+ struct nfs_lock_context *lock, loff_t offset, loff_t len)
{
struct inode *inode = file_inode(filep);
struct nfs_server *server = NFS_SERVER(inode);
@@ -56,7 +35,8 @@
msg->rpc_argp = &args;
msg->rpc_resp = &res;
- status = nfs42_set_rw_stateid(&args.falloc_stateid, filep, FMODE_WRITE);
+ status = nfs4_set_rw_stateid(&args.falloc_stateid, lock->open_context,
+ lock, FMODE_WRITE);
if (status)
return status;
@@ -78,15 +58,26 @@
{
struct nfs_server *server = NFS_SERVER(file_inode(filep));
struct nfs4_exception exception = { };
+ struct nfs_lock_context *lock;
int err;
+ lock = nfs_get_lock_context(nfs_file_open_context(filep));
+ if (IS_ERR(lock))
+ return PTR_ERR(lock);
+
+ exception.inode = file_inode(filep);
+ exception.state = lock->open_context->state;
+
do {
- err = _nfs42_proc_fallocate(msg, filep, offset, len);
- if (err == -ENOTSUPP)
- return -EOPNOTSUPP;
+ err = _nfs42_proc_fallocate(msg, filep, lock, offset, len);
+ if (err == -ENOTSUPP) {
+ err = -EOPNOTSUPP;
+ break;
+ }
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
+ nfs_put_lock_context(lock);
return err;
}
@@ -135,7 +126,8 @@
return err;
}
-static loff_t _nfs42_proc_llseek(struct file *filep, loff_t offset, int whence)
+static loff_t _nfs42_proc_llseek(struct file *filep,
+ struct nfs_lock_context *lock, loff_t offset, int whence)
{
struct inode *inode = file_inode(filep);
struct nfs42_seek_args args = {
@@ -156,7 +148,8 @@
if (!nfs_server_capable(inode, NFS_CAP_SEEK))
return -ENOTSUPP;
- status = nfs42_set_rw_stateid(&args.sa_stateid, filep, FMODE_READ);
+ status = nfs4_set_rw_stateid(&args.sa_stateid, lock->open_context,
+ lock, FMODE_READ);
if (status)
return status;
@@ -175,17 +168,28 @@
{
struct nfs_server *server = NFS_SERVER(file_inode(filep));
struct nfs4_exception exception = { };
+ struct nfs_lock_context *lock;
loff_t err;
+ lock = nfs_get_lock_context(nfs_file_open_context(filep));
+ if (IS_ERR(lock))
+ return PTR_ERR(lock);
+
+ exception.inode = file_inode(filep);
+ exception.state = lock->open_context->state;
+
do {
- err = _nfs42_proc_llseek(filep, offset, whence);
+ err = _nfs42_proc_llseek(filep, lock, offset, whence);
if (err >= 0)
break;
- if (err == -ENOTSUPP)
- return -EOPNOTSUPP;
+ if (err == -ENOTSUPP) {
+ err = -EOPNOTSUPP;
+ break;
+ }
err = nfs4_handle_exception(server, err, &exception);
} while (exception.retry);
+ nfs_put_lock_context(lock);
return err;
}
@@ -298,8 +302,9 @@
}
static int _nfs42_proc_clone(struct rpc_message *msg, struct file *src_f,
- struct file *dst_f, loff_t src_offset,
- loff_t dst_offset, loff_t count)
+ struct file *dst_f, struct nfs_lock_context *src_lock,
+ struct nfs_lock_context *dst_lock, loff_t src_offset,
+ loff_t dst_offset, loff_t count)
{
struct inode *src_inode = file_inode(src_f);
struct inode *dst_inode = file_inode(dst_f);
@@ -320,11 +325,13 @@
msg->rpc_argp = &args;
msg->rpc_resp = &res;
- status = nfs42_set_rw_stateid(&args.src_stateid, src_f, FMODE_READ);
+ status = nfs4_set_rw_stateid(&args.src_stateid, src_lock->open_context,
+ src_lock, FMODE_READ);
if (status)
return status;
- status = nfs42_set_rw_stateid(&args.dst_stateid, dst_f, FMODE_WRITE);
+ status = nfs4_set_rw_stateid(&args.dst_stateid, dst_lock->open_context,
+ dst_lock, FMODE_WRITE);
if (status)
return status;
@@ -349,22 +356,48 @@
};
struct inode *inode = file_inode(src_f);
struct nfs_server *server = NFS_SERVER(file_inode(src_f));
- struct nfs4_exception exception = { };
- int err;
+ struct nfs_lock_context *src_lock;
+ struct nfs_lock_context *dst_lock;
+ struct nfs4_exception src_exception = { };
+ struct nfs4_exception dst_exception = { };
+ int err, err2;
if (!nfs_server_capable(inode, NFS_CAP_CLONE))
return -EOPNOTSUPP;
+ src_lock = nfs_get_lock_context(nfs_file_open_context(src_f));
+ if (IS_ERR(src_lock))
+ return PTR_ERR(src_lock);
+
+ src_exception.inode = file_inode(src_f);
+ src_exception.state = src_lock->open_context->state;
+
+ dst_lock = nfs_get_lock_context(nfs_file_open_context(dst_f));
+ if (IS_ERR(dst_lock)) {
+ err = PTR_ERR(dst_lock);
+ goto out_put_src_lock;
+ }
+
+ dst_exception.inode = file_inode(dst_f);
+ dst_exception.state = dst_lock->open_context->state;
+
do {
- err = _nfs42_proc_clone(&msg, src_f, dst_f, src_offset,
- dst_offset, count);
+ err = _nfs42_proc_clone(&msg, src_f, dst_f, src_lock, dst_lock,
+ src_offset, dst_offset, count);
if (err == -ENOTSUPP || err == -EOPNOTSUPP) {
NFS_SERVER(inode)->caps &= ~NFS_CAP_CLONE;
- return -EOPNOTSUPP;
+ err = -EOPNOTSUPP;
+ break;
}
- err = nfs4_handle_exception(server, err, &exception);
- } while (exception.retry);
+ err2 = nfs4_handle_exception(server, err, &src_exception);
+ err = nfs4_handle_exception(server, err, &dst_exception);
+ if (!err)
+ err = err2;
+ } while (src_exception.retry || dst_exception.retry);
+
+ nfs_put_lock_context(dst_lock);
+out_put_src_lock:
+ nfs_put_lock_context(src_lock);
return err;
-
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index 4bfc33a..1488159 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -2466,9 +2466,9 @@
dentry = d_add_unique(dentry, igrab(state->inode));
if (dentry == NULL) {
dentry = opendata->dentry;
- } else if (dentry != ctx->dentry) {
+ } else {
dput(ctx->dentry);
- ctx->dentry = dget(dentry);
+ ctx->dentry = dentry;
}
nfs_set_verifier(dentry,
nfs_save_change_attribute(d_inode(opendata->dir)));
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 482b6e9..2fa483e 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -252,6 +252,27 @@
}
}
+/*
+ * Mark a pnfs_layout_hdr and all associated layout segments as invalid
+ *
+ * In order to continue using the pnfs_layout_hdr, a full recovery
+ * is required.
+ * Note that caller must hold inode->i_lock.
+ */
+static int
+pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
+ struct list_head *lseg_list)
+{
+ struct pnfs_layout_range range = {
+ .iomode = IOMODE_ANY,
+ .offset = 0,
+ .length = NFS4_MAX_UINT64,
+ };
+
+ set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
+ return pnfs_mark_matching_lsegs_invalid(lo, lseg_list, &range);
+}
+
static int
pnfs_iomode_to_fail_bit(u32 iomode)
{
@@ -554,9 +575,8 @@
spin_lock(&nfsi->vfs_inode.i_lock);
lo = nfsi->layout;
if (lo) {
- lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
- pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
pnfs_get_layout_hdr(lo);
+ pnfs_mark_layout_stateid_invalid(lo, &tmp_list);
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
spin_unlock(&nfsi->vfs_inode.i_lock);
@@ -617,11 +637,6 @@
{
struct pnfs_layout_hdr *lo;
struct inode *inode;
- struct pnfs_layout_range range = {
- .iomode = IOMODE_ANY,
- .offset = 0,
- .length = NFS4_MAX_UINT64,
- };
LIST_HEAD(lseg_list);
int ret = 0;
@@ -636,11 +651,11 @@
spin_lock(&inode->i_lock);
list_del_init(&lo->plh_bulk_destroy);
- lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
- if (is_bulk_recall)
- set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
- if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
+ if (pnfs_mark_layout_stateid_invalid(lo, &lseg_list)) {
+ if (is_bulk_recall)
+ set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
ret = -EAGAIN;
+ }
spin_unlock(&inode->i_lock);
pnfs_free_lseg_list(&lseg_list);
/* Free all lsegs that are attached to commit buckets */
@@ -1738,8 +1753,19 @@
if (lo->plh_return_iomode != 0)
iomode = IOMODE_ANY;
lo->plh_return_iomode = iomode;
+ set_bit(NFS_LAYOUT_RETURN_REQUESTED, &lo->plh_flags);
}
+/**
+ * pnfs_mark_matching_lsegs_return - Free or return matching layout segments
+ * @lo: pointer to layout header
+ * @tmp_list: list header to be used with pnfs_free_lseg_list()
+ * @return_range: describe layout segment ranges to be returned
+ *
+ * This function is mainly intended for use by layoutrecall. It attempts
+ * to free the layout segment immediately, or else to mark it for return
+ * as soon as its reference count drops to zero.
+ */
int
pnfs_mark_matching_lsegs_return(struct pnfs_layout_hdr *lo,
struct list_head *tmp_list,
@@ -1762,12 +1788,11 @@
lseg, lseg->pls_range.iomode,
lseg->pls_range.offset,
lseg->pls_range.length);
+ if (mark_lseg_invalid(lseg, tmp_list))
+ continue;
+ remaining++;
set_bit(NFS_LSEG_LAYOUTRETURN, &lseg->pls_flags);
pnfs_set_plh_return_iomode(lo, return_range->iomode);
- if (!mark_lseg_invalid(lseg, tmp_list))
- remaining++;
- set_bit(NFS_LAYOUT_RETURN_REQUESTED,
- &lo->plh_flags);
}
return remaining;
}
diff --git a/fs/notify/mark.c b/fs/notify/mark.c
index cfcbf11..7115c5d 100644
--- a/fs/notify/mark.c
+++ b/fs/notify/mark.c
@@ -91,7 +91,14 @@
#include <linux/fsnotify_backend.h>
#include "fsnotify.h"
+#define FSNOTIFY_REAPER_DELAY (1) /* 1 jiffy */
+
struct srcu_struct fsnotify_mark_srcu;
+static DEFINE_SPINLOCK(destroy_lock);
+static LIST_HEAD(destroy_list);
+
+static void fsnotify_mark_destroy(struct work_struct *work);
+static DECLARE_DELAYED_WORK(reaper_work, fsnotify_mark_destroy);
void fsnotify_get_mark(struct fsnotify_mark *mark)
{
@@ -165,19 +172,10 @@
atomic_dec(&group->num_marks);
}
-static void
-fsnotify_mark_free_rcu(struct rcu_head *rcu)
-{
- struct fsnotify_mark *mark;
-
- mark = container_of(rcu, struct fsnotify_mark, g_rcu);
- fsnotify_put_mark(mark);
-}
-
/*
- * Free fsnotify mark. The freeing is actually happening from a call_srcu
- * callback. Caller must have a reference to the mark or be protected by
- * fsnotify_mark_srcu.
+ * Free fsnotify mark. The freeing is actually happening from a kthread which
+ * first waits for srcu period end. Caller must have a reference to the mark
+ * or be protected by fsnotify_mark_srcu.
*/
void fsnotify_free_mark(struct fsnotify_mark *mark)
{
@@ -192,7 +190,11 @@
mark->flags &= ~FSNOTIFY_MARK_FLAG_ALIVE;
spin_unlock(&mark->lock);
- call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu);
+ spin_lock(&destroy_lock);
+ list_add(&mark->g_list, &destroy_list);
+ spin_unlock(&destroy_lock);
+ queue_delayed_work(system_unbound_wq, &reaper_work,
+ FSNOTIFY_REAPER_DELAY);
/*
* Some groups like to know that marks are being freed. This is a
@@ -388,7 +390,12 @@
spin_unlock(&mark->lock);
- call_srcu(&fsnotify_mark_srcu, &mark->g_rcu, fsnotify_mark_free_rcu);
+ spin_lock(&destroy_lock);
+ list_add(&mark->g_list, &destroy_list);
+ spin_unlock(&destroy_lock);
+ queue_delayed_work(system_unbound_wq, &reaper_work,
+ FSNOTIFY_REAPER_DELAY);
+
return ret;
}
@@ -491,3 +498,21 @@
atomic_set(&mark->refcnt, 1);
mark->free_mark = free_mark;
}
+
+static void fsnotify_mark_destroy(struct work_struct *work)
+{
+ struct fsnotify_mark *mark, *next;
+ struct list_head private_destroy_list;
+
+ spin_lock(&destroy_lock);
+ /* exchange the list head */
+ list_replace_init(&destroy_list, &private_destroy_list);
+ spin_unlock(&destroy_lock);
+
+ synchronize_srcu(&fsnotify_mark_srcu);
+
+ list_for_each_entry_safe(mark, next, &private_destroy_list, g_list) {
+ list_del_init(&mark->g_list);
+ fsnotify_put_mark(mark);
+ }
+}
diff --git a/fs/ocfs2/aops.c b/fs/ocfs2/aops.c
index 794fd15..cda0361 100644
--- a/fs/ocfs2/aops.c
+++ b/fs/ocfs2/aops.c
@@ -956,6 +956,7 @@
tmp_ret = ocfs2_del_inode_from_orphan(osb, inode, di_bh,
update_isize, end);
if (tmp_ret < 0) {
+ ocfs2_inode_unlock(inode, 1);
ret = tmp_ret;
mlog_errno(ret);
brelse(di_bh);
diff --git a/fs/pnode.c b/fs/pnode.c
index 6367e1e..c524fdd 100644
--- a/fs/pnode.c
+++ b/fs/pnode.c
@@ -202,6 +202,11 @@
static struct mountpoint *mp;
static struct hlist_head *list;
+static inline bool peers(struct mount *m1, struct mount *m2)
+{
+ return m1->mnt_group_id == m2->mnt_group_id && m1->mnt_group_id;
+}
+
static int propagate_one(struct mount *m)
{
struct mount *child;
@@ -212,7 +217,7 @@
/* skip if mountpoint isn't covered by it */
if (!is_subdir(mp->m_dentry, m->mnt.mnt_root))
return 0;
- if (m->mnt_group_id == last_dest->mnt_group_id) {
+ if (peers(m, last_dest)) {
type = CL_MAKE_SHARED;
} else {
struct mount *n, *p;
@@ -223,7 +228,7 @@
last_source = last_source->mnt_master;
last_dest = last_source->mnt_parent;
}
- if (n->mnt_group_id != last_dest->mnt_group_id) {
+ if (!peers(n, last_dest)) {
last_source = last_source->mnt_master;
last_dest = last_source->mnt_parent;
}
diff --git a/fs/read_write.c b/fs/read_write.c
index 324ec27..dadf24e 100644
--- a/fs/read_write.c
+++ b/fs/read_write.c
@@ -17,6 +17,7 @@
#include <linux/splice.h>
#include <linux/compat.h>
#include <linux/mount.h>
+#include <linux/fs.h>
#include "internal.h"
#include <asm/uaccess.h>
@@ -183,7 +184,7 @@
switch (whence) {
case SEEK_SET: case SEEK_CUR:
return generic_file_llseek_size(file, offset, whence,
- ~0ULL, 0);
+ OFFSET_MAX, 0);
default:
return -EINVAL;
}
@@ -1532,10 +1533,12 @@
if (!(file_in->f_mode & FMODE_READ) ||
!(file_out->f_mode & FMODE_WRITE) ||
- (file_out->f_flags & O_APPEND) ||
- !file_in->f_op->clone_file_range)
+ (file_out->f_flags & O_APPEND))
return -EBADF;
+ if (!file_in->f_op->clone_file_range)
+ return -EOPNOTSUPP;
+
ret = clone_verify_area(file_in, pos_in, len, false);
if (ret)
return ret;
diff --git a/fs/xattr.c b/fs/xattr.c
index 07d0e47..4861322 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -940,7 +940,7 @@
bool trusted = capable(CAP_SYS_ADMIN);
struct simple_xattr *xattr;
ssize_t remaining_size = size;
- int err;
+ int err = 0;
#ifdef CONFIG_FS_POSIX_ACL
if (inode->i_acl) {
@@ -965,11 +965,11 @@
err = xattr_list_one(&buffer, &remaining_size, xattr->name);
if (err)
- return err;
+ break;
}
spin_unlock(&xattrs->lock);
- return size - remaining_size;
+ return err ? err : size - remaining_size;
}
/*
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 379c089..a9ebabfe 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -55,7 +55,7 @@
} while ((bh = bh->b_this_page) != head);
}
-STATIC struct block_device *
+struct block_device *
xfs_find_bdev_for_inode(
struct inode *inode)
{
@@ -1208,6 +1208,10 @@
struct writeback_control *wbc)
{
xfs_iflags_clear(XFS_I(mapping->host), XFS_ITRUNCATED);
+ if (dax_mapping(mapping))
+ return dax_writeback_mapping_range(mapping,
+ xfs_find_bdev_for_inode(mapping->host), wbc);
+
return generic_writepages(mapping, wbc);
}
diff --git a/fs/xfs/xfs_aops.h b/fs/xfs/xfs_aops.h
index f6ffc9a..a4343c6 100644
--- a/fs/xfs/xfs_aops.h
+++ b/fs/xfs/xfs_aops.h
@@ -62,5 +62,6 @@
struct buffer_head *map_bh, int create);
extern void xfs_count_page_state(struct page *, int *, int *);
+extern struct block_device *xfs_find_bdev_for_inode(struct inode *);
#endif /* __XFS_AOPS_H__ */
diff --git a/fs/xfs/xfs_bmap_util.c b/fs/xfs/xfs_bmap_util.c
index 45ec9e4..6c87601 100644
--- a/fs/xfs/xfs_bmap_util.c
+++ b/fs/xfs/xfs_bmap_util.c
@@ -75,7 +75,8 @@
ssize_t size = XFS_FSB_TO_B(mp, count_fsb);
if (IS_DAX(VFS_I(ip)))
- return dax_clear_blocks(VFS_I(ip), block, size);
+ return dax_clear_sectors(xfs_find_bdev_for_inode(VFS_I(ip)),
+ sector, size);
/*
* let the block layer decide on the fastest method of
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index da37beb..594f7e6 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -4491,7 +4491,7 @@
* know precisely what failed.
*/
if (pass == XLOG_RECOVER_CRCPASS) {
- if (rhead->h_crc && crc != le32_to_cpu(rhead->h_crc))
+ if (rhead->h_crc && crc != rhead->h_crc)
return -EFSBADCRC;
return 0;
}
@@ -4502,7 +4502,7 @@
* zero CRC check prevents warnings from being emitted when upgrading
* the kernel from one that does not add CRCs by default.
*/
- if (crc != le32_to_cpu(rhead->h_crc)) {
+ if (crc != rhead->h_crc) {
if (rhead->h_crc || xfs_sb_version_hascrc(&log->l_mp->m_sb)) {
xfs_alert(log->l_mp,
"log record CRC mismatch: found 0x%x, expected 0x%x.",
diff --git a/include/asm-generic/cputime_nsecs.h b/include/asm-generic/cputime_nsecs.h
index 0419485..0f1c6f3 100644
--- a/include/asm-generic/cputime_nsecs.h
+++ b/include/asm-generic/cputime_nsecs.h
@@ -75,7 +75,7 @@
*/
static inline cputime_t timespec_to_cputime(const struct timespec *val)
{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC + val->tv_nsec;
return (__force cputime_t) ret;
}
static inline void cputime_to_timespec(const cputime_t ct, struct timespec *val)
@@ -91,7 +91,8 @@
*/
static inline cputime_t timeval_to_cputime(const struct timeval *val)
{
- u64 ret = val->tv_sec * NSEC_PER_SEC + val->tv_usec * NSEC_PER_USEC;
+ u64 ret = (u64)val->tv_sec * NSEC_PER_SEC +
+ val->tv_usec * NSEC_PER_USEC;
return (__force cputime_t) ret;
}
static inline void cputime_to_timeval(const cputime_t ct, struct timeval *val)
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 0b3c0d3..c370b26 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -239,6 +239,14 @@
pmd_t *pmdp);
#endif
+#ifndef __HAVE_ARCH_PMDP_HUGE_SPLIT_PREPARE
+static inline void pmdp_huge_split_prepare(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+
+}
+#endif
+
#ifndef __HAVE_ARCH_PTE_SAME
static inline int pte_same(pte_t pte_a, pte_t pte_b)
{
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index c65a212..c5b4b81 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -1166,6 +1166,7 @@
struct drm_mode_object base;
char *name;
+ int connector_id;
int connector_type;
int connector_type_id;
bool interlace_allowed;
@@ -2047,6 +2048,7 @@
struct list_head fb_list;
int num_connector;
+ struct ida connector_ida;
struct list_head connector_list;
int num_encoder;
struct list_head encoder_list;
@@ -2200,7 +2202,11 @@
void drm_connector_unregister(struct drm_connector *connector);
extern void drm_connector_cleanup(struct drm_connector *connector);
-extern unsigned int drm_connector_index(struct drm_connector *connector);
+static inline unsigned drm_connector_index(struct drm_connector *connector)
+{
+ return connector->connector_id;
+}
+
/* helper to unplug all connectors from sysfs for device */
extern void drm_connector_unplug_all(struct drm_device *dev);
diff --git a/include/dt-bindings/clock/tegra210-car.h b/include/dt-bindings/clock/tegra210-car.h
index 6f45aea..0a05b0d 100644
--- a/include/dt-bindings/clock/tegra210-car.h
+++ b/include/dt-bindings/clock/tegra210-car.h
@@ -126,7 +126,7 @@
/* 104 */
/* 105 */
#define TEGRA210_CLK_D_AUDIO 106
-/* 107 ( affects abp -> ape) */
+#define TEGRA210_CLK_APB2APE 107
/* 108 */
/* 109 */
/* 110 */
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index 1800227..b651aed 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -55,6 +55,9 @@
/* VGIC mapping */
struct irq_phys_map *map;
+
+ /* Active IRQ state caching */
+ bool active_cleared_last;
};
int kvm_timer_hyp_init(void);
@@ -74,4 +77,6 @@
void kvm_timer_schedule(struct kvm_vcpu *vcpu);
void kvm_timer_unschedule(struct kvm_vcpu *vcpu);
+void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu);
+
#endif
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
new file mode 100644
index 0000000..fe389ac
--- /dev/null
+++ b/include/kvm/arm_pmu.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Shannon Zhao <shannon.zhao@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __ASM_ARM_KVM_PMU_H
+#define __ASM_ARM_KVM_PMU_H
+
+#ifdef CONFIG_KVM_ARM_PMU
+
+#include <linux/perf_event.h>
+#include <asm/perf_event.h>
+
+#define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1)
+
+struct kvm_pmc {
+ u8 idx; /* index into the pmu->pmc array */
+ struct perf_event *perf_event;
+ u64 bitmask;
+};
+
+struct kvm_pmu {
+ int irq_num;
+ struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS];
+ bool ready;
+ bool irq_level;
+};
+
+#define kvm_arm_pmu_v3_ready(v) ((v)->arch.pmu.ready)
+#define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS)
+u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx);
+void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val);
+u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu);
+void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu);
+void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu);
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
+void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
+void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
+ u64 select_idx);
+bool kvm_arm_support_pmu_v3(void);
+int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr);
+#else
+struct kvm_pmu {
+};
+
+#define kvm_arm_pmu_v3_ready(v) (false)
+#define kvm_arm_pmu_irq_initialized(v) (false)
+static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu,
+ u64 select_idx)
+{
+ return 0;
+}
+static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu,
+ u64 select_idx, u64 val) {}
+static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
+static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
+static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
+ u64 data, u64 select_idx) {}
+static inline bool kvm_arm_support_pmu_v3(void) { return false; }
+static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
+static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
+static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
+ struct kvm_device_attr *attr)
+{
+ return -ENXIO;
+}
+#endif
+
+#endif
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index 13a3d53..281caf8 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -279,12 +279,6 @@
u32 vgic_lr[VGIC_V2_MAX_LRS];
};
-/*
- * LRs are stored in reverse order in memory. make sure we index them
- * correctly.
- */
-#define VGIC_V3_LR_INDEX(lr) (VGIC_V3_MAX_LRS - 1 - lr)
-
struct vgic_v3_cpu_if {
#ifdef CONFIG_KVM_ARM_VGIC_V3
u32 vgic_hcr;
@@ -321,6 +315,8 @@
/* Protected by the distributor's irq_phys_map_lock */
struct list_head irq_phys_map_list;
+
+ u64 live_lrs;
};
#define LR_EMPTY 0xff
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 29189ae..4571ef1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -682,9 +682,12 @@
/*
* q->prep_rq_fn return values
*/
-#define BLKPREP_OK 0 /* serve it */
-#define BLKPREP_KILL 1 /* fatal error, kill */
-#define BLKPREP_DEFER 2 /* leave on queue */
+enum {
+ BLKPREP_OK, /* serve it */
+ BLKPREP_KILL, /* fatal error, kill, return -EIO */
+ BLKPREP_DEFER, /* leave on queue */
+ BLKPREP_INVALID, /* invalid command, kill, return -EREMOTEIO */
+};
extern unsigned long blk_max_low_pfn, blk_max_pfn;
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 7f540f7..789471d 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -127,6 +127,12 @@
*/
u64 serial_nr;
+ /*
+ * Incremented by online self and children. Used to guarantee that
+ * parents are not offlined before their children.
+ */
+ atomic_t online_cnt;
+
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index 00b042c..48f5aab 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -144,7 +144,7 @@
*/
#define if(cond, ...) __trace_if( (cond , ## __VA_ARGS__) )
#define __trace_if(cond) \
- if (__builtin_constant_p((cond)) ? !!(cond) : \
+ if (__builtin_constant_p(!!(cond)) ? !!(cond) : \
({ \
int ______r; \
static struct ftrace_branch_data \
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 85a868c..fea160e 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -137,6 +137,8 @@
task_unlock(current);
}
+extern void cpuset_post_attach_flush(void);
+
#else /* !CONFIG_CPUSETS */
static inline bool cpusets_enabled(void) { return false; }
@@ -243,6 +245,10 @@
return false;
}
+static inline void cpuset_post_attach_flush(void)
+{
+}
+
#endif /* !CONFIG_CPUSETS */
#endif /* _LINUX_CPUSET_H */
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 818e450..636dd59 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,7 +7,7 @@
ssize_t dax_do_io(struct kiocb *, struct inode *, struct iov_iter *, loff_t,
get_block_t, dio_iodone_t, int flags);
-int dax_clear_blocks(struct inode *, sector_t block, long size);
+int dax_clear_sectors(struct block_device *bdev, sector_t _sector, long _size);
int dax_zero_page_range(struct inode *, loff_t from, unsigned len, get_block_t);
int dax_truncate_page(struct inode *, loff_t from, get_block_t);
int dax_fault(struct vm_area_struct *, struct vm_fault *, get_block_t,
@@ -52,6 +52,8 @@
{
return mapping->host && IS_DAX(mapping->host);
}
-int dax_writeback_mapping_range(struct address_space *mapping, loff_t start,
- loff_t end);
+
+struct writeback_control;
+int dax_writeback_mapping_range(struct address_space *mapping,
+ struct block_device *bdev, struct writeback_control *wbc);
#endif
diff --git a/include/linux/devpts_fs.h b/include/linux/devpts_fs.h
index 251a209..e0ee0b3 100644
--- a/include/linux/devpts_fs.h
+++ b/include/linux/devpts_fs.h
@@ -19,6 +19,8 @@
int devpts_new_index(struct inode *ptmx_inode);
void devpts_kill_index(struct inode *ptmx_inode, int idx);
+void devpts_add_ref(struct inode *ptmx_inode);
+void devpts_del_ref(struct inode *ptmx_inode);
/* mknod in devpts */
struct inode *devpts_pty_new(struct inode *ptmx_inode, dev_t device, int index,
void *priv);
@@ -32,6 +34,8 @@
/* Dummy stubs in the no-pty case */
static inline int devpts_new_index(struct inode *ptmx_inode) { return -EINVAL; }
static inline void devpts_kill_index(struct inode *ptmx_inode, int idx) { }
+static inline void devpts_add_ref(struct inode *ptmx_inode) { }
+static inline void devpts_del_ref(struct inode *ptmx_inode) { }
static inline struct inode *devpts_pty_new(struct inode *ptmx_inode,
dev_t device, int index, void *priv)
{
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 569b5a8..47be3ad 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1199,7 +1199,10 @@
struct efivar_entry *efivar_entry_find(efi_char16_t *name, efi_guid_t guid,
struct list_head *head, bool remove);
-bool efivar_validate(efi_char16_t *var_name, u8 *data, unsigned long len);
+bool efivar_validate(efi_guid_t vendor, efi_char16_t *var_name, u8 *data,
+ unsigned long data_size);
+bool efivar_variable_is_removable(efi_guid_t vendor, const char *name,
+ size_t len);
extern struct work_struct efivar_work;
void efivar_run_worker(void);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index 6b7e89f..533c440 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -220,10 +220,7 @@
/* List of marks by group->i_fsnotify_marks. Also reused for queueing
* mark into destroy_list when it's waiting for the end of SRCU period
* before it can be freed. [group->mark_mutex] */
- union {
- struct list_head g_list;
- struct rcu_head g_rcu;
- };
+ struct list_head g_list;
/* Protects inode / mnt pointers, flags, masks */
spinlock_t lock;
/* List of marks for inode / vfsmount [obj_lock] */
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 81de712..c2b340e 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -603,6 +603,7 @@
extern int skip_trace(unsigned long ip);
extern void ftrace_module_init(struct module *mod);
+extern void ftrace_module_enable(struct module *mod);
extern void ftrace_release_mod(struct module *mod);
extern void ftrace_disable_daemon(void);
@@ -612,8 +613,9 @@
static inline int ftrace_force_update(void) { return 0; }
static inline void ftrace_disable_daemon(void) { }
static inline void ftrace_enable_daemon(void) { }
-static inline void ftrace_release_mod(struct module *mod) {}
-static inline void ftrace_module_init(struct module *mod) {}
+static inline void ftrace_module_init(struct module *mod) { }
+static inline void ftrace_module_enable(struct module *mod) { }
+static inline void ftrace_release_mod(struct module *mod) { }
static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
{
return -EINVAL;
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index 821273c..2d9b6500 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -235,6 +235,9 @@
/* low 64 bit */
#define dma_frcd_page_addr(d) (d & (((u64)-1) << PAGE_SHIFT))
+/* PRS_REG */
+#define DMA_PRS_PPR ((u32)1)
+
#define IOMMU_WAIT_OP(iommu, offset, op, cond, sts) \
do { \
cycles_t start_time = get_cycles(); \
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 851821b..bec2abb 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -526,6 +526,7 @@
enum ata_lpm_hints {
ATA_LPM_EMPTY = (1 << 0), /* port empty/probing */
ATA_LPM_HIPM = (1 << 1), /* may use HIPM */
+ ATA_LPM_WAKE_ONLY = (1 << 2), /* only wake up link */
};
/* forward declarations */
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index bed40df..141ffdd 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -26,9 +26,8 @@
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
- ND_CMD_MAX_ELEM = 4,
+ ND_CMD_MAX_ELEM = 5,
ND_CMD_MAX_ENVELOPE = 16,
- ND_CMD_ARS_STATUS_MAX = SZ_4K,
ND_MAX_MAPPINGS = 32,
/* region flag indicating to direct-map persistent memory by default */
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index d675011..2190419 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -135,6 +135,10 @@
/* Memory types */
NVM_ID_FMTYPE_SLC = 0,
NVM_ID_FMTYPE_MLC = 1,
+
+ /* Device capabilities */
+ NVM_ID_DCAP_BBLKMGMT = 0x1,
+ NVM_UD_DCAP_ECC = 0x2,
};
struct nvm_id_lp_mlc {
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index c57e424..4dca42f 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -66,7 +66,7 @@
/*
* class-hash:
*/
- struct list_head hash_entry;
+ struct hlist_node hash_entry;
/*
* global list of all lock-classes:
@@ -199,7 +199,7 @@
u8 irq_context;
u8 depth;
u16 base;
- struct list_head entry;
+ struct hlist_node entry;
u64 chain_key;
};
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 430a929..a0e8cc8 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -44,6 +44,8 @@
#include <linux/timecounter.h>
+#define DEFAULT_UAR_PAGE_SHIFT 12
+
#define MAX_MSIX_P_PORT 17
#define MAX_MSIX 64
#define MIN_MSIX_P_PORT 5
@@ -856,6 +858,7 @@
u64 regid_promisc_array[MLX4_MAX_PORTS + 1];
u64 regid_allmulti_array[MLX4_MAX_PORTS + 1];
struct mlx4_vf_dev *dev_vfs;
+ u8 uar_page_shift;
};
struct mlx4_clock_params {
@@ -1528,4 +1531,14 @@
int mlx4_get_internal_clock_params(struct mlx4_dev *dev,
struct mlx4_clock_params *params);
+static inline int mlx4_to_hw_uar_index(struct mlx4_dev *dev, int index)
+{
+ return (index << (PAGE_SHIFT - dev->uar_page_shift));
+}
+
+static inline int mlx4_get_num_reserved_uar(struct mlx4_dev *dev)
+{
+ /* The first 128 UARs are used for EQ doorbells */
+ return (128 >> (PAGE_SHIFT - dev->uar_page_shift));
+}
#endif /* MLX4_DEVICE_H */
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 231ab6b..51f1e54 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -207,15 +207,15 @@
u8 outer_dmac[0x1];
u8 outer_smac[0x1];
u8 outer_ether_type[0x1];
- u8 reserved_0[0x1];
+ u8 reserved_at_3[0x1];
u8 outer_first_prio[0x1];
u8 outer_first_cfi[0x1];
u8 outer_first_vid[0x1];
- u8 reserved_1[0x1];
+ u8 reserved_at_7[0x1];
u8 outer_second_prio[0x1];
u8 outer_second_cfi[0x1];
u8 outer_second_vid[0x1];
- u8 reserved_2[0x1];
+ u8 reserved_at_b[0x1];
u8 outer_sip[0x1];
u8 outer_dip[0x1];
u8 outer_frag[0x1];
@@ -230,21 +230,21 @@
u8 outer_gre_protocol[0x1];
u8 outer_gre_key[0x1];
u8 outer_vxlan_vni[0x1];
- u8 reserved_3[0x5];
+ u8 reserved_at_1a[0x5];
u8 source_eswitch_port[0x1];
u8 inner_dmac[0x1];
u8 inner_smac[0x1];
u8 inner_ether_type[0x1];
- u8 reserved_4[0x1];
+ u8 reserved_at_23[0x1];
u8 inner_first_prio[0x1];
u8 inner_first_cfi[0x1];
u8 inner_first_vid[0x1];
- u8 reserved_5[0x1];
+ u8 reserved_at_27[0x1];
u8 inner_second_prio[0x1];
u8 inner_second_cfi[0x1];
u8 inner_second_vid[0x1];
- u8 reserved_6[0x1];
+ u8 reserved_at_2b[0x1];
u8 inner_sip[0x1];
u8 inner_dip[0x1];
u8 inner_frag[0x1];
@@ -256,37 +256,37 @@
u8 inner_tcp_sport[0x1];
u8 inner_tcp_dport[0x1];
u8 inner_tcp_flags[0x1];
- u8 reserved_7[0x9];
+ u8 reserved_at_37[0x9];
- u8 reserved_8[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_flow_table_prop_layout_bits {
u8 ft_support[0x1];
- u8 reserved_0[0x2];
+ u8 reserved_at_1[0x2];
u8 flow_modify_en[0x1];
u8 modify_root[0x1];
u8 identified_miss_table_mode[0x1];
u8 flow_table_modify[0x1];
- u8 reserved_1[0x19];
+ u8 reserved_at_7[0x19];
- u8 reserved_2[0x2];
+ u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
- u8 reserved_3[0x10];
+ u8 reserved_at_28[0x10];
u8 max_ft_level[0x8];
- u8 reserved_4[0x20];
+ u8 reserved_at_40[0x20];
- u8 reserved_5[0x18];
+ u8 reserved_at_60[0x18];
u8 log_max_ft_num[0x8];
- u8 reserved_6[0x18];
+ u8 reserved_at_80[0x18];
u8 log_max_destination[0x8];
- u8 reserved_7[0x18];
+ u8 reserved_at_a0[0x18];
u8 log_max_flow[0x8];
- u8 reserved_8[0x40];
+ u8 reserved_at_c0[0x40];
struct mlx5_ifc_flow_table_fields_supported_bits ft_field_support;
@@ -298,13 +298,13 @@
u8 receive[0x1];
u8 write[0x1];
u8 read[0x1];
- u8 reserved_0[0x1];
+ u8 reserved_at_4[0x1];
u8 srq_receive[0x1];
- u8 reserved_1[0x1a];
+ u8 reserved_at_6[0x1a];
};
struct mlx5_ifc_ipv4_layout_bits {
- u8 reserved_0[0x60];
+ u8 reserved_at_0[0x60];
u8 ipv4[0x20];
};
@@ -316,7 +316,7 @@
union mlx5_ifc_ipv6_layout_ipv4_layout_auto_bits {
struct mlx5_ifc_ipv6_layout_bits ipv6_layout;
struct mlx5_ifc_ipv4_layout_bits ipv4_layout;
- u8 reserved_0[0x80];
+ u8 reserved_at_0[0x80];
};
struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
@@ -336,15 +336,15 @@
u8 ip_dscp[0x6];
u8 ip_ecn[0x2];
u8 vlan_tag[0x1];
- u8 reserved_0[0x1];
+ u8 reserved_at_91[0x1];
u8 frag[0x1];
- u8 reserved_1[0x4];
+ u8 reserved_at_93[0x4];
u8 tcp_flags[0x9];
u8 tcp_sport[0x10];
u8 tcp_dport[0x10];
- u8 reserved_2[0x20];
+ u8 reserved_at_c0[0x20];
u8 udp_sport[0x10];
u8 udp_dport[0x10];
@@ -355,9 +355,9 @@
};
struct mlx5_ifc_fte_match_set_misc_bits {
- u8 reserved_0[0x20];
+ u8 reserved_at_0[0x20];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 source_port[0x10];
u8 outer_second_prio[0x3];
@@ -369,31 +369,31 @@
u8 outer_second_vlan_tag[0x1];
u8 inner_second_vlan_tag[0x1];
- u8 reserved_2[0xe];
+ u8 reserved_at_62[0xe];
u8 gre_protocol[0x10];
u8 gre_key_h[0x18];
u8 gre_key_l[0x8];
u8 vxlan_vni[0x18];
- u8 reserved_3[0x8];
+ u8 reserved_at_b8[0x8];
- u8 reserved_4[0x20];
+ u8 reserved_at_c0[0x20];
- u8 reserved_5[0xc];
+ u8 reserved_at_e0[0xc];
u8 outer_ipv6_flow_label[0x14];
- u8 reserved_6[0xc];
+ u8 reserved_at_100[0xc];
u8 inner_ipv6_flow_label[0x14];
- u8 reserved_7[0xe0];
+ u8 reserved_at_120[0xe0];
};
struct mlx5_ifc_cmd_pas_bits {
u8 pa_h[0x20];
u8 pa_l[0x14];
- u8 reserved_0[0xc];
+ u8 reserved_at_34[0xc];
};
struct mlx5_ifc_uint64_bits {
@@ -418,31 +418,31 @@
struct mlx5_ifc_ads_bits {
u8 fl[0x1];
u8 free_ar[0x1];
- u8 reserved_0[0xe];
+ u8 reserved_at_2[0xe];
u8 pkey_index[0x10];
- u8 reserved_1[0x8];
+ u8 reserved_at_20[0x8];
u8 grh[0x1];
u8 mlid[0x7];
u8 rlid[0x10];
u8 ack_timeout[0x5];
- u8 reserved_2[0x3];
+ u8 reserved_at_45[0x3];
u8 src_addr_index[0x8];
- u8 reserved_3[0x4];
+ u8 reserved_at_50[0x4];
u8 stat_rate[0x4];
u8 hop_limit[0x8];
- u8 reserved_4[0x4];
+ u8 reserved_at_60[0x4];
u8 tclass[0x8];
u8 flow_label[0x14];
u8 rgid_rip[16][0x8];
- u8 reserved_5[0x4];
+ u8 reserved_at_100[0x4];
u8 f_dscp[0x1];
u8 f_ecn[0x1];
- u8 reserved_6[0x1];
+ u8 reserved_at_106[0x1];
u8 f_eth_prio[0x1];
u8 ecn[0x2];
u8 dscp[0x6];
@@ -458,25 +458,25 @@
};
struct mlx5_ifc_flow_table_nic_cap_bits {
- u8 reserved_0[0x200];
+ u8 reserved_at_0[0x200];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive;
- u8 reserved_1[0x200];
+ u8 reserved_at_400[0x200];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_receive_sniffer;
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit;
- u8 reserved_2[0x200];
+ u8 reserved_at_a00[0x200];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_transmit_sniffer;
- u8 reserved_3[0x7200];
+ u8 reserved_at_e00[0x7200];
};
struct mlx5_ifc_flow_table_eswitch_cap_bits {
- u8 reserved_0[0x200];
+ u8 reserved_at_0[0x200];
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_nic_esw_fdb;
@@ -484,7 +484,7 @@
struct mlx5_ifc_flow_table_prop_layout_bits flow_table_properties_esw_acl_egress;
- u8 reserved_1[0x7800];
+ u8 reserved_at_800[0x7800];
};
struct mlx5_ifc_e_switch_cap_bits {
@@ -493,9 +493,9 @@
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert_if_not_exist[0x1];
u8 vport_cvlan_insert_overwrite[0x1];
- u8 reserved_0[0x1b];
+ u8 reserved_at_5[0x1b];
- u8 reserved_1[0x7e0];
+ u8 reserved_at_20[0x7e0];
};
struct mlx5_ifc_per_protocol_networking_offload_caps_bits {
@@ -504,51 +504,51 @@
u8 lro_cap[0x1];
u8 lro_psh_flag[0x1];
u8 lro_time_stamp[0x1];
- u8 reserved_0[0x3];
+ u8 reserved_at_5[0x3];
u8 self_lb_en_modifiable[0x1];
- u8 reserved_1[0x2];
+ u8 reserved_at_9[0x2];
u8 max_lso_cap[0x5];
- u8 reserved_2[0x4];
+ u8 reserved_at_10[0x4];
u8 rss_ind_tbl_cap[0x4];
- u8 reserved_3[0x3];
+ u8 reserved_at_18[0x3];
u8 tunnel_lso_const_out_ip_id[0x1];
- u8 reserved_4[0x2];
+ u8 reserved_at_1c[0x2];
u8 tunnel_statless_gre[0x1];
u8 tunnel_stateless_vxlan[0x1];
- u8 reserved_5[0x20];
+ u8 reserved_at_20[0x20];
- u8 reserved_6[0x10];
+ u8 reserved_at_40[0x10];
u8 lro_min_mss_size[0x10];
- u8 reserved_7[0x120];
+ u8 reserved_at_60[0x120];
u8 lro_timer_supported_periods[4][0x20];
- u8 reserved_8[0x600];
+ u8 reserved_at_200[0x600];
};
struct mlx5_ifc_roce_cap_bits {
u8 roce_apm[0x1];
- u8 reserved_0[0x1f];
+ u8 reserved_at_1[0x1f];
- u8 reserved_1[0x60];
+ u8 reserved_at_20[0x60];
- u8 reserved_2[0xc];
+ u8 reserved_at_80[0xc];
u8 l3_type[0x4];
- u8 reserved_3[0x8];
+ u8 reserved_at_90[0x8];
u8 roce_version[0x8];
- u8 reserved_4[0x10];
+ u8 reserved_at_a0[0x10];
u8 r_roce_dest_udp_port[0x10];
u8 r_roce_max_src_udp_port[0x10];
u8 r_roce_min_src_udp_port[0x10];
- u8 reserved_5[0x10];
+ u8 reserved_at_e0[0x10];
u8 roce_address_table_size[0x10];
- u8 reserved_6[0x700];
+ u8 reserved_at_100[0x700];
};
enum {
@@ -576,35 +576,35 @@
};
struct mlx5_ifc_atomic_caps_bits {
- u8 reserved_0[0x40];
+ u8 reserved_at_0[0x40];
u8 atomic_req_8B_endianess_mode[0x2];
- u8 reserved_1[0x4];
+ u8 reserved_at_42[0x4];
u8 supported_atomic_req_8B_endianess_mode_1[0x1];
- u8 reserved_2[0x19];
+ u8 reserved_at_47[0x19];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
- u8 reserved_4[0x10];
+ u8 reserved_at_80[0x10];
u8 atomic_operations[0x10];
- u8 reserved_5[0x10];
+ u8 reserved_at_a0[0x10];
u8 atomic_size_qp[0x10];
- u8 reserved_6[0x10];
+ u8 reserved_at_c0[0x10];
u8 atomic_size_dc[0x10];
- u8 reserved_7[0x720];
+ u8 reserved_at_e0[0x720];
};
struct mlx5_ifc_odp_cap_bits {
- u8 reserved_0[0x40];
+ u8 reserved_at_0[0x40];
u8 sig[0x1];
- u8 reserved_1[0x1f];
+ u8 reserved_at_41[0x1f];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
struct mlx5_ifc_odp_per_transport_service_cap_bits rc_odp_caps;
@@ -612,7 +612,7 @@
struct mlx5_ifc_odp_per_transport_service_cap_bits ud_odp_caps;
- u8 reserved_3[0x720];
+ u8 reserved_at_e0[0x720];
};
enum {
@@ -660,55 +660,55 @@
};
struct mlx5_ifc_cmd_hca_cap_bits {
- u8 reserved_0[0x80];
+ u8 reserved_at_0[0x80];
u8 log_max_srq_sz[0x8];
u8 log_max_qp_sz[0x8];
- u8 reserved_1[0xb];
+ u8 reserved_at_90[0xb];
u8 log_max_qp[0x5];
- u8 reserved_2[0xb];
+ u8 reserved_at_a0[0xb];
u8 log_max_srq[0x5];
- u8 reserved_3[0x10];
+ u8 reserved_at_b0[0x10];
- u8 reserved_4[0x8];
+ u8 reserved_at_c0[0x8];
u8 log_max_cq_sz[0x8];
- u8 reserved_5[0xb];
+ u8 reserved_at_d0[0xb];
u8 log_max_cq[0x5];
u8 log_max_eq_sz[0x8];
- u8 reserved_6[0x2];
+ u8 reserved_at_e8[0x2];
u8 log_max_mkey[0x6];
- u8 reserved_7[0xc];
+ u8 reserved_at_f0[0xc];
u8 log_max_eq[0x4];
u8 max_indirection[0x8];
- u8 reserved_8[0x1];
+ u8 reserved_at_108[0x1];
u8 log_max_mrw_sz[0x7];
- u8 reserved_9[0x2];
+ u8 reserved_at_110[0x2];
u8 log_max_bsf_list_size[0x6];
- u8 reserved_10[0x2];
+ u8 reserved_at_118[0x2];
u8 log_max_klm_list_size[0x6];
- u8 reserved_11[0xa];
+ u8 reserved_at_120[0xa];
u8 log_max_ra_req_dc[0x6];
- u8 reserved_12[0xa];
+ u8 reserved_at_130[0xa];
u8 log_max_ra_res_dc[0x6];
- u8 reserved_13[0xa];
+ u8 reserved_at_140[0xa];
u8 log_max_ra_req_qp[0x6];
- u8 reserved_14[0xa];
+ u8 reserved_at_150[0xa];
u8 log_max_ra_res_qp[0x6];
u8 pad_cap[0x1];
u8 cc_query_allowed[0x1];
u8 cc_modify_allowed[0x1];
- u8 reserved_15[0xd];
+ u8 reserved_at_163[0xd];
u8 gid_table_size[0x10];
u8 out_of_seq_cnt[0x1];
u8 vport_counters[0x1];
- u8 reserved_16[0x4];
+ u8 reserved_at_182[0x4];
u8 max_qp_cnt[0xa];
u8 pkey_table_size[0x10];
@@ -716,158 +716,158 @@
u8 vhca_group_manager[0x1];
u8 ib_virt[0x1];
u8 eth_virt[0x1];
- u8 reserved_17[0x1];
+ u8 reserved_at_1a4[0x1];
u8 ets[0x1];
u8 nic_flow_table[0x1];
u8 eswitch_flow_table[0x1];
u8 early_vf_enable;
- u8 reserved_18[0x2];
+ u8 reserved_at_1a8[0x2];
u8 local_ca_ack_delay[0x5];
- u8 reserved_19[0x6];
+ u8 reserved_at_1af[0x6];
u8 port_type[0x2];
u8 num_ports[0x8];
- u8 reserved_20[0x3];
+ u8 reserved_at_1bf[0x3];
u8 log_max_msg[0x5];
- u8 reserved_21[0x18];
+ u8 reserved_at_1c7[0x18];
u8 stat_rate_support[0x10];
- u8 reserved_22[0xc];
+ u8 reserved_at_1ef[0xc];
u8 cqe_version[0x4];
u8 compact_address_vector[0x1];
- u8 reserved_23[0xe];
+ u8 reserved_at_200[0xe];
u8 drain_sigerr[0x1];
u8 cmdif_checksum[0x2];
u8 sigerr_cqe[0x1];
- u8 reserved_24[0x1];
+ u8 reserved_at_212[0x1];
u8 wq_signature[0x1];
u8 sctr_data_cqe[0x1];
- u8 reserved_25[0x1];
+ u8 reserved_at_215[0x1];
u8 sho[0x1];
u8 tph[0x1];
u8 rf[0x1];
u8 dct[0x1];
- u8 reserved_26[0x1];
+ u8 reserved_at_21a[0x1];
u8 eth_net_offloads[0x1];
u8 roce[0x1];
u8 atomic[0x1];
- u8 reserved_27[0x1];
+ u8 reserved_at_21e[0x1];
u8 cq_oi[0x1];
u8 cq_resize[0x1];
u8 cq_moderation[0x1];
- u8 reserved_28[0x3];
+ u8 reserved_at_222[0x3];
u8 cq_eq_remap[0x1];
u8 pg[0x1];
u8 block_lb_mc[0x1];
- u8 reserved_29[0x1];
+ u8 reserved_at_228[0x1];
u8 scqe_break_moderation[0x1];
- u8 reserved_30[0x1];
+ u8 reserved_at_22a[0x1];
u8 cd[0x1];
- u8 reserved_31[0x1];
+ u8 reserved_at_22c[0x1];
u8 apm[0x1];
- u8 reserved_32[0x7];
+ u8 reserved_at_22e[0x7];
u8 qkv[0x1];
u8 pkv[0x1];
- u8 reserved_33[0x4];
+ u8 reserved_at_237[0x4];
u8 xrc[0x1];
u8 ud[0x1];
u8 uc[0x1];
u8 rc[0x1];
- u8 reserved_34[0xa];
+ u8 reserved_at_23f[0xa];
u8 uar_sz[0x6];
- u8 reserved_35[0x8];
+ u8 reserved_at_24f[0x8];
u8 log_pg_sz[0x8];
u8 bf[0x1];
- u8 reserved_36[0x1];
+ u8 reserved_at_260[0x1];
u8 pad_tx_eth_packet[0x1];
- u8 reserved_37[0x8];
+ u8 reserved_at_262[0x8];
u8 log_bf_reg_size[0x5];
- u8 reserved_38[0x10];
+ u8 reserved_at_26f[0x10];
- u8 reserved_39[0x10];
+ u8 reserved_at_27f[0x10];
u8 max_wqe_sz_sq[0x10];
- u8 reserved_40[0x10];
+ u8 reserved_at_29f[0x10];
u8 max_wqe_sz_rq[0x10];
- u8 reserved_41[0x10];
+ u8 reserved_at_2bf[0x10];
u8 max_wqe_sz_sq_dc[0x10];
- u8 reserved_42[0x7];
+ u8 reserved_at_2df[0x7];
u8 max_qp_mcg[0x19];
- u8 reserved_43[0x18];
+ u8 reserved_at_2ff[0x18];
u8 log_max_mcg[0x8];
- u8 reserved_44[0x3];
+ u8 reserved_at_31f[0x3];
u8 log_max_transport_domain[0x5];
- u8 reserved_45[0x3];
+ u8 reserved_at_327[0x3];
u8 log_max_pd[0x5];
- u8 reserved_46[0xb];
+ u8 reserved_at_32f[0xb];
u8 log_max_xrcd[0x5];
- u8 reserved_47[0x20];
+ u8 reserved_at_33f[0x20];
- u8 reserved_48[0x3];
+ u8 reserved_at_35f[0x3];
u8 log_max_rq[0x5];
- u8 reserved_49[0x3];
+ u8 reserved_at_367[0x3];
u8 log_max_sq[0x5];
- u8 reserved_50[0x3];
+ u8 reserved_at_36f[0x3];
u8 log_max_tir[0x5];
- u8 reserved_51[0x3];
+ u8 reserved_at_377[0x3];
u8 log_max_tis[0x5];
u8 basic_cyclic_rcv_wqe[0x1];
- u8 reserved_52[0x2];
+ u8 reserved_at_380[0x2];
u8 log_max_rmp[0x5];
- u8 reserved_53[0x3];
+ u8 reserved_at_387[0x3];
u8 log_max_rqt[0x5];
- u8 reserved_54[0x3];
+ u8 reserved_at_38f[0x3];
u8 log_max_rqt_size[0x5];
- u8 reserved_55[0x3];
+ u8 reserved_at_397[0x3];
u8 log_max_tis_per_sq[0x5];
- u8 reserved_56[0x3];
+ u8 reserved_at_39f[0x3];
u8 log_max_stride_sz_rq[0x5];
- u8 reserved_57[0x3];
+ u8 reserved_at_3a7[0x3];
u8 log_min_stride_sz_rq[0x5];
- u8 reserved_58[0x3];
+ u8 reserved_at_3af[0x3];
u8 log_max_stride_sz_sq[0x5];
- u8 reserved_59[0x3];
+ u8 reserved_at_3b7[0x3];
u8 log_min_stride_sz_sq[0x5];
- u8 reserved_60[0x1b];
+ u8 reserved_at_3bf[0x1b];
u8 log_max_wq_sz[0x5];
u8 nic_vport_change_event[0x1];
- u8 reserved_61[0xa];
+ u8 reserved_at_3e0[0xa];
u8 log_max_vlan_list[0x5];
- u8 reserved_62[0x3];
+ u8 reserved_at_3ef[0x3];
u8 log_max_current_mc_list[0x5];
- u8 reserved_63[0x3];
+ u8 reserved_at_3f7[0x3];
u8 log_max_current_uc_list[0x5];
- u8 reserved_64[0x80];
+ u8 reserved_at_3ff[0x80];
- u8 reserved_65[0x3];
+ u8 reserved_at_47f[0x3];
u8 log_max_l2_table[0x5];
- u8 reserved_66[0x8];
+ u8 reserved_at_487[0x8];
u8 log_uar_page_sz[0x10];
- u8 reserved_67[0x20];
+ u8 reserved_at_49f[0x20];
u8 device_frequency_mhz[0x20];
u8 device_frequency_khz[0x20];
- u8 reserved_68[0x5f];
+ u8 reserved_at_4ff[0x5f];
u8 cqe_zip[0x1];
u8 cqe_zip_timeout[0x10];
u8 cqe_zip_max_num[0x10];
- u8 reserved_69[0x220];
+ u8 reserved_at_57f[0x220];
};
enum mlx5_flow_destination_type {
@@ -880,7 +880,7 @@
u8 destination_type[0x8];
u8 destination_id[0x18];
- u8 reserved_0[0x20];
+ u8 reserved_at_20[0x20];
};
struct mlx5_ifc_fte_match_param_bits {
@@ -890,7 +890,7 @@
struct mlx5_ifc_fte_match_set_lyr_2_4_bits inner_headers;
- u8 reserved_0[0xa00];
+ u8 reserved_at_600[0xa00];
};
enum {
@@ -922,18 +922,18 @@
u8 wq_signature[0x1];
u8 end_padding_mode[0x2];
u8 cd_slave[0x1];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 hds_skip_first_sge[0x1];
u8 log2_hds_buf_size[0x3];
- u8 reserved_1[0x7];
+ u8 reserved_at_24[0x7];
u8 page_offset[0x5];
u8 lwm[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 pd[0x18];
- u8 reserved_3[0x8];
+ u8 reserved_at_60[0x8];
u8 uar_page[0x18];
u8 dbr_addr[0x40];
@@ -942,60 +942,60 @@
u8 sw_counter[0x20];
- u8 reserved_4[0xc];
+ u8 reserved_at_100[0xc];
u8 log_wq_stride[0x4];
- u8 reserved_5[0x3];
+ u8 reserved_at_110[0x3];
u8 log_wq_pg_sz[0x5];
- u8 reserved_6[0x3];
+ u8 reserved_at_118[0x3];
u8 log_wq_sz[0x5];
- u8 reserved_7[0x4e0];
+ u8 reserved_at_120[0x4e0];
struct mlx5_ifc_cmd_pas_bits pas[0];
};
struct mlx5_ifc_rq_num_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 rq_num[0x18];
};
struct mlx5_ifc_mac_address_layout_bits {
- u8 reserved_0[0x10];
+ u8 reserved_at_0[0x10];
u8 mac_addr_47_32[0x10];
u8 mac_addr_31_0[0x20];
};
struct mlx5_ifc_vlan_layout_bits {
- u8 reserved_0[0x14];
+ u8 reserved_at_0[0x14];
u8 vlan[0x0c];
- u8 reserved_1[0x20];
+ u8 reserved_at_20[0x20];
};
struct mlx5_ifc_cong_control_r_roce_ecn_np_bits {
- u8 reserved_0[0xa0];
+ u8 reserved_at_0[0xa0];
u8 min_time_between_cnps[0x20];
- u8 reserved_1[0x12];
+ u8 reserved_at_c0[0x12];
u8 cnp_dscp[0x6];
- u8 reserved_2[0x5];
+ u8 reserved_at_d8[0x5];
u8 cnp_802p_prio[0x3];
- u8 reserved_3[0x720];
+ u8 reserved_at_e0[0x720];
};
struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits {
- u8 reserved_0[0x60];
+ u8 reserved_at_0[0x60];
- u8 reserved_1[0x4];
+ u8 reserved_at_60[0x4];
u8 clamp_tgt_rate[0x1];
- u8 reserved_2[0x3];
+ u8 reserved_at_65[0x3];
u8 clamp_tgt_rate_after_time_inc[0x1];
- u8 reserved_3[0x17];
+ u8 reserved_at_69[0x17];
- u8 reserved_4[0x20];
+ u8 reserved_at_80[0x20];
u8 rpg_time_reset[0x20];
@@ -1015,7 +1015,7 @@
u8 rpg_min_rate[0x20];
- u8 reserved_5[0xe0];
+ u8 reserved_at_1c0[0xe0];
u8 rate_to_set_on_first_cnp[0x20];
@@ -1025,15 +1025,15 @@
u8 rate_reduce_monitor_period[0x20];
- u8 reserved_6[0x20];
+ u8 reserved_at_320[0x20];
u8 initial_alpha_value[0x20];
- u8 reserved_7[0x4a0];
+ u8 reserved_at_360[0x4a0];
};
struct mlx5_ifc_cong_control_802_1qau_rp_bits {
- u8 reserved_0[0x80];
+ u8 reserved_at_0[0x80];
u8 rppp_max_rps[0x20];
@@ -1055,7 +1055,7 @@
u8 rpg_min_rate[0x20];
- u8 reserved_1[0x640];
+ u8 reserved_at_1c0[0x640];
};
enum {
@@ -1205,7 +1205,7 @@
u8 successful_recovery_events[0x20];
- u8 reserved_0[0x180];
+ u8 reserved_at_640[0x180];
};
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
@@ -1213,7 +1213,7 @@
u8 transmit_queue_low[0x20];
- u8 reserved_0[0x780];
+ u8 reserved_at_40[0x780];
};
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits {
@@ -1221,7 +1221,7 @@
u8 rx_octets_low[0x20];
- u8 reserved_0[0xc0];
+ u8 reserved_at_40[0xc0];
u8 rx_frames_high[0x20];
@@ -1231,7 +1231,7 @@
u8 tx_octets_low[0x20];
- u8 reserved_1[0xc0];
+ u8 reserved_at_180[0xc0];
u8 tx_frames_high[0x20];
@@ -1257,7 +1257,7 @@
u8 rx_pause_transition_low[0x20];
- u8 reserved_2[0x400];
+ u8 reserved_at_3c0[0x400];
};
struct mlx5_ifc_eth_extended_cntrs_grp_data_layout_bits {
@@ -1265,7 +1265,7 @@
u8 port_transmit_wait_low[0x20];
- u8 reserved_0[0x780];
+ u8 reserved_at_40[0x780];
};
struct mlx5_ifc_eth_3635_cntrs_grp_data_layout_bits {
@@ -1333,7 +1333,7 @@
u8 dot3out_pause_frames_low[0x20];
- u8 reserved_0[0x3c0];
+ u8 reserved_at_400[0x3c0];
};
struct mlx5_ifc_eth_2819_cntrs_grp_data_layout_bits {
@@ -1421,7 +1421,7 @@
u8 ether_stats_pkts8192to10239octets_low[0x20];
- u8 reserved_0[0x280];
+ u8 reserved_at_540[0x280];
};
struct mlx5_ifc_eth_2863_cntrs_grp_data_layout_bits {
@@ -1477,7 +1477,7 @@
u8 if_out_broadcast_pkts_low[0x20];
- u8 reserved_0[0x480];
+ u8 reserved_at_340[0x480];
};
struct mlx5_ifc_eth_802_3_cntrs_grp_data_layout_bits {
@@ -1557,54 +1557,54 @@
u8 a_pause_mac_ctrl_frames_transmitted_low[0x20];
- u8 reserved_0[0x300];
+ u8 reserved_at_4c0[0x300];
};
struct mlx5_ifc_cmd_inter_comp_event_bits {
u8 command_completion_vector[0x20];
- u8 reserved_0[0xc0];
+ u8 reserved_at_20[0xc0];
};
struct mlx5_ifc_stall_vl_event_bits {
- u8 reserved_0[0x18];
+ u8 reserved_at_0[0x18];
u8 port_num[0x1];
- u8 reserved_1[0x3];
+ u8 reserved_at_19[0x3];
u8 vl[0x4];
- u8 reserved_2[0xa0];
+ u8 reserved_at_20[0xa0];
};
struct mlx5_ifc_db_bf_congestion_event_bits {
u8 event_subtype[0x8];
- u8 reserved_0[0x8];
+ u8 reserved_at_8[0x8];
u8 congestion_level[0x8];
- u8 reserved_1[0x8];
+ u8 reserved_at_18[0x8];
- u8 reserved_2[0xa0];
+ u8 reserved_at_20[0xa0];
};
struct mlx5_ifc_gpio_event_bits {
- u8 reserved_0[0x60];
+ u8 reserved_at_0[0x60];
u8 gpio_event_hi[0x20];
u8 gpio_event_lo[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_a0[0x40];
};
struct mlx5_ifc_port_state_change_event_bits {
- u8 reserved_0[0x40];
+ u8 reserved_at_0[0x40];
u8 port_num[0x4];
- u8 reserved_1[0x1c];
+ u8 reserved_at_44[0x1c];
- u8 reserved_2[0x80];
+ u8 reserved_at_60[0x80];
};
struct mlx5_ifc_dropped_packet_logged_bits {
- u8 reserved_0[0xe0];
+ u8 reserved_at_0[0xe0];
};
enum {
@@ -1613,15 +1613,15 @@
};
struct mlx5_ifc_cq_error_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 cqn[0x18];
- u8 reserved_1[0x20];
+ u8 reserved_at_20[0x20];
- u8 reserved_2[0x18];
+ u8 reserved_at_40[0x18];
u8 syndrome[0x8];
- u8 reserved_3[0x80];
+ u8 reserved_at_60[0x80];
};
struct mlx5_ifc_rdma_page_fault_event_bits {
@@ -1629,14 +1629,14 @@
u8 r_key[0x20];
- u8 reserved_0[0x10];
+ u8 reserved_at_40[0x10];
u8 packet_len[0x10];
u8 rdma_op_len[0x20];
u8 rdma_va[0x40];
- u8 reserved_1[0x5];
+ u8 reserved_at_c0[0x5];
u8 rdma[0x1];
u8 write[0x1];
u8 requestor[0x1];
@@ -1646,15 +1646,15 @@
struct mlx5_ifc_wqe_associated_page_fault_event_bits {
u8 bytes_committed[0x20];
- u8 reserved_0[0x10];
+ u8 reserved_at_20[0x10];
u8 wqe_index[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_40[0x10];
u8 len[0x10];
- u8 reserved_2[0x60];
+ u8 reserved_at_60[0x60];
- u8 reserved_3[0x5];
+ u8 reserved_at_c0[0x5];
u8 rdma[0x1];
u8 write_read[0x1];
u8 requestor[0x1];
@@ -1662,26 +1662,26 @@
};
struct mlx5_ifc_qp_events_bits {
- u8 reserved_0[0xa0];
+ u8 reserved_at_0[0xa0];
u8 type[0x8];
- u8 reserved_1[0x18];
+ u8 reserved_at_a8[0x18];
- u8 reserved_2[0x8];
+ u8 reserved_at_c0[0x8];
u8 qpn_rqn_sqn[0x18];
};
struct mlx5_ifc_dct_events_bits {
- u8 reserved_0[0xc0];
+ u8 reserved_at_0[0xc0];
- u8 reserved_1[0x8];
+ u8 reserved_at_c0[0x8];
u8 dct_number[0x18];
};
struct mlx5_ifc_comp_event_bits {
- u8 reserved_0[0xc0];
+ u8 reserved_at_0[0xc0];
- u8 reserved_1[0x8];
+ u8 reserved_at_c0[0x8];
u8 cq_number[0x18];
};
@@ -1754,41 +1754,41 @@
struct mlx5_ifc_qpc_bits {
u8 state[0x4];
- u8 reserved_0[0x4];
+ u8 reserved_at_4[0x4];
u8 st[0x8];
- u8 reserved_1[0x3];
+ u8 reserved_at_10[0x3];
u8 pm_state[0x2];
- u8 reserved_2[0x7];
+ u8 reserved_at_15[0x7];
u8 end_padding_mode[0x2];
- u8 reserved_3[0x2];
+ u8 reserved_at_1e[0x2];
u8 wq_signature[0x1];
u8 block_lb_mc[0x1];
u8 atomic_like_write_en[0x1];
u8 latency_sensitive[0x1];
- u8 reserved_4[0x1];
+ u8 reserved_at_24[0x1];
u8 drain_sigerr[0x1];
- u8 reserved_5[0x2];
+ u8 reserved_at_26[0x2];
u8 pd[0x18];
u8 mtu[0x3];
u8 log_msg_max[0x5];
- u8 reserved_6[0x1];
+ u8 reserved_at_48[0x1];
u8 log_rq_size[0x4];
u8 log_rq_stride[0x3];
u8 no_sq[0x1];
u8 log_sq_size[0x4];
- u8 reserved_7[0x6];
+ u8 reserved_at_55[0x6];
u8 rlky[0x1];
- u8 reserved_8[0x4];
+ u8 reserved_at_5c[0x4];
u8 counter_set_id[0x8];
u8 uar_page[0x18];
- u8 reserved_9[0x8];
+ u8 reserved_at_80[0x8];
u8 user_index[0x18];
- u8 reserved_10[0x3];
+ u8 reserved_at_a0[0x3];
u8 log_page_size[0x5];
u8 remote_qpn[0x18];
@@ -1797,66 +1797,66 @@
struct mlx5_ifc_ads_bits secondary_address_path;
u8 log_ack_req_freq[0x4];
- u8 reserved_11[0x4];
+ u8 reserved_at_384[0x4];
u8 log_sra_max[0x3];
- u8 reserved_12[0x2];
+ u8 reserved_at_38b[0x2];
u8 retry_count[0x3];
u8 rnr_retry[0x3];
- u8 reserved_13[0x1];
+ u8 reserved_at_393[0x1];
u8 fre[0x1];
u8 cur_rnr_retry[0x3];
u8 cur_retry_count[0x3];
- u8 reserved_14[0x5];
+ u8 reserved_at_39b[0x5];
- u8 reserved_15[0x20];
+ u8 reserved_at_3a0[0x20];
- u8 reserved_16[0x8];
+ u8 reserved_at_3c0[0x8];
u8 next_send_psn[0x18];
- u8 reserved_17[0x8];
+ u8 reserved_at_3e0[0x8];
u8 cqn_snd[0x18];
- u8 reserved_18[0x40];
+ u8 reserved_at_400[0x40];
- u8 reserved_19[0x8];
+ u8 reserved_at_440[0x8];
u8 last_acked_psn[0x18];
- u8 reserved_20[0x8];
+ u8 reserved_at_460[0x8];
u8 ssn[0x18];
- u8 reserved_21[0x8];
+ u8 reserved_at_480[0x8];
u8 log_rra_max[0x3];
- u8 reserved_22[0x1];
+ u8 reserved_at_48b[0x1];
u8 atomic_mode[0x4];
u8 rre[0x1];
u8 rwe[0x1];
u8 rae[0x1];
- u8 reserved_23[0x1];
+ u8 reserved_at_493[0x1];
u8 page_offset[0x6];
- u8 reserved_24[0x3];
+ u8 reserved_at_49a[0x3];
u8 cd_slave_receive[0x1];
u8 cd_slave_send[0x1];
u8 cd_master[0x1];
- u8 reserved_25[0x3];
+ u8 reserved_at_4a0[0x3];
u8 min_rnr_nak[0x5];
u8 next_rcv_psn[0x18];
- u8 reserved_26[0x8];
+ u8 reserved_at_4c0[0x8];
u8 xrcd[0x18];
- u8 reserved_27[0x8];
+ u8 reserved_at_4e0[0x8];
u8 cqn_rcv[0x18];
u8 dbr_addr[0x40];
u8 q_key[0x20];
- u8 reserved_28[0x5];
+ u8 reserved_at_560[0x5];
u8 rq_type[0x3];
u8 srqn_rmpn[0x18];
- u8 reserved_29[0x8];
+ u8 reserved_at_580[0x8];
u8 rmsn[0x18];
u8 hw_sq_wqebb_counter[0x10];
@@ -1866,33 +1866,33 @@
u8 sw_rq_counter[0x20];
- u8 reserved_30[0x20];
+ u8 reserved_at_600[0x20];
- u8 reserved_31[0xf];
+ u8 reserved_at_620[0xf];
u8 cgs[0x1];
u8 cs_req[0x8];
u8 cs_res[0x8];
u8 dc_access_key[0x40];
- u8 reserved_32[0xc0];
+ u8 reserved_at_680[0xc0];
};
struct mlx5_ifc_roce_addr_layout_bits {
u8 source_l3_address[16][0x8];
- u8 reserved_0[0x3];
+ u8 reserved_at_80[0x3];
u8 vlan_valid[0x1];
u8 vlan_id[0xc];
u8 source_mac_47_32[0x10];
u8 source_mac_31_0[0x20];
- u8 reserved_1[0x14];
+ u8 reserved_at_c0[0x14];
u8 roce_l3_type[0x4];
u8 roce_version[0x8];
- u8 reserved_2[0x20];
+ u8 reserved_at_e0[0x20];
};
union mlx5_ifc_hca_cap_union_bits {
@@ -1904,7 +1904,7 @@
struct mlx5_ifc_flow_table_nic_cap_bits flow_table_nic_cap;
struct mlx5_ifc_flow_table_eswitch_cap_bits flow_table_eswitch_cap;
struct mlx5_ifc_e_switch_cap_bits e_switch_cap;
- u8 reserved_0[0x8000];
+ u8 reserved_at_0[0x8000];
};
enum {
@@ -1914,24 +1914,24 @@
};
struct mlx5_ifc_flow_context_bits {
- u8 reserved_0[0x20];
+ u8 reserved_at_0[0x20];
u8 group_id[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 flow_tag[0x18];
- u8 reserved_2[0x10];
+ u8 reserved_at_60[0x10];
u8 action[0x10];
- u8 reserved_3[0x8];
+ u8 reserved_at_80[0x8];
u8 destination_list_size[0x18];
- u8 reserved_4[0x160];
+ u8 reserved_at_a0[0x160];
struct mlx5_ifc_fte_match_param_bits match_value;
- u8 reserved_5[0x600];
+ u8 reserved_at_1200[0x600];
struct mlx5_ifc_dest_format_struct_bits destination[0];
};
@@ -1944,43 +1944,43 @@
struct mlx5_ifc_xrc_srqc_bits {
u8 state[0x4];
u8 log_xrc_srq_size[0x4];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 wq_signature[0x1];
u8 cont_srq[0x1];
- u8 reserved_1[0x1];
+ u8 reserved_at_22[0x1];
u8 rlky[0x1];
u8 basic_cyclic_rcv_wqe[0x1];
u8 log_rq_stride[0x3];
u8 xrcd[0x18];
u8 page_offset[0x6];
- u8 reserved_2[0x2];
+ u8 reserved_at_46[0x2];
u8 cqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 user_index_equal_xrc_srqn[0x1];
- u8 reserved_4[0x1];
+ u8 reserved_at_81[0x1];
u8 log_page_size[0x6];
u8 user_index[0x18];
- u8 reserved_5[0x20];
+ u8 reserved_at_a0[0x20];
- u8 reserved_6[0x8];
+ u8 reserved_at_c0[0x8];
u8 pd[0x18];
u8 lwm[0x10];
u8 wqe_cnt[0x10];
- u8 reserved_7[0x40];
+ u8 reserved_at_100[0x40];
u8 db_record_addr_h[0x20];
u8 db_record_addr_l[0x1e];
- u8 reserved_8[0x2];
+ u8 reserved_at_17e[0x2];
- u8 reserved_9[0x80];
+ u8 reserved_at_180[0x80];
};
struct mlx5_ifc_traffic_counter_bits {
@@ -1990,16 +1990,16 @@
};
struct mlx5_ifc_tisc_bits {
- u8 reserved_0[0xc];
+ u8 reserved_at_0[0xc];
u8 prio[0x4];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_2[0x100];
+ u8 reserved_at_20[0x100];
- u8 reserved_3[0x8];
+ u8 reserved_at_120[0x8];
u8 transport_domain[0x18];
- u8 reserved_4[0x3c0];
+ u8 reserved_at_140[0x3c0];
};
enum {
@@ -2024,31 +2024,31 @@
};
struct mlx5_ifc_tirc_bits {
- u8 reserved_0[0x20];
+ u8 reserved_at_0[0x20];
u8 disp_type[0x4];
- u8 reserved_1[0x1c];
+ u8 reserved_at_24[0x1c];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
- u8 reserved_3[0x4];
+ u8 reserved_at_80[0x4];
u8 lro_timeout_period_usecs[0x10];
u8 lro_enable_mask[0x4];
u8 lro_max_ip_payload_size[0x8];
- u8 reserved_4[0x40];
+ u8 reserved_at_a0[0x40];
- u8 reserved_5[0x8];
+ u8 reserved_at_e0[0x8];
u8 inline_rqn[0x18];
u8 rx_hash_symmetric[0x1];
- u8 reserved_6[0x1];
+ u8 reserved_at_101[0x1];
u8 tunneled_offload_en[0x1];
- u8 reserved_7[0x5];
+ u8 reserved_at_103[0x5];
u8 indirect_table[0x18];
u8 rx_hash_fn[0x4];
- u8 reserved_8[0x2];
+ u8 reserved_at_124[0x2];
u8 self_lb_block[0x2];
u8 transport_domain[0x18];
@@ -2058,7 +2058,7 @@
struct mlx5_ifc_rx_hash_field_select_bits rx_hash_field_selector_inner;
- u8 reserved_9[0x4c0];
+ u8 reserved_at_2c0[0x4c0];
};
enum {
@@ -2069,39 +2069,39 @@
struct mlx5_ifc_srqc_bits {
u8 state[0x4];
u8 log_srq_size[0x4];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 wq_signature[0x1];
u8 cont_srq[0x1];
- u8 reserved_1[0x1];
+ u8 reserved_at_22[0x1];
u8 rlky[0x1];
- u8 reserved_2[0x1];
+ u8 reserved_at_24[0x1];
u8 log_rq_stride[0x3];
u8 xrcd[0x18];
u8 page_offset[0x6];
- u8 reserved_3[0x2];
+ u8 reserved_at_46[0x2];
u8 cqn[0x18];
- u8 reserved_4[0x20];
+ u8 reserved_at_60[0x20];
- u8 reserved_5[0x2];
+ u8 reserved_at_80[0x2];
u8 log_page_size[0x6];
- u8 reserved_6[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_7[0x20];
+ u8 reserved_at_a0[0x20];
- u8 reserved_8[0x8];
+ u8 reserved_at_c0[0x8];
u8 pd[0x18];
u8 lwm[0x10];
u8 wqe_cnt[0x10];
- u8 reserved_9[0x40];
+ u8 reserved_at_100[0x40];
u8 dbr_addr[0x40];
- u8 reserved_10[0x80];
+ u8 reserved_at_180[0x80];
};
enum {
@@ -2115,39 +2115,39 @@
u8 cd_master[0x1];
u8 fre[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_0[0x4];
+ u8 reserved_at_4[0x4];
u8 state[0x4];
- u8 reserved_1[0x14];
+ u8 reserved_at_c[0x14];
- u8 reserved_2[0x8];
+ u8 reserved_at_20[0x8];
u8 user_index[0x18];
- u8 reserved_3[0x8];
+ u8 reserved_at_40[0x8];
u8 cqn[0x18];
- u8 reserved_4[0xa0];
+ u8 reserved_at_60[0xa0];
u8 tis_lst_sz[0x10];
- u8 reserved_5[0x10];
+ u8 reserved_at_110[0x10];
- u8 reserved_6[0x40];
+ u8 reserved_at_120[0x40];
- u8 reserved_7[0x8];
+ u8 reserved_at_160[0x8];
u8 tis_num_0[0x18];
struct mlx5_ifc_wq_bits wq;
};
struct mlx5_ifc_rqtc_bits {
- u8 reserved_0[0xa0];
+ u8 reserved_at_0[0xa0];
- u8 reserved_1[0x10];
+ u8 reserved_at_a0[0x10];
u8 rqt_max_size[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_c0[0x10];
u8 rqt_actual_size[0x10];
- u8 reserved_3[0x6a0];
+ u8 reserved_at_e0[0x6a0];
struct mlx5_ifc_rq_num_bits rq_num[0];
};
@@ -2165,27 +2165,27 @@
struct mlx5_ifc_rqc_bits {
u8 rlky[0x1];
- u8 reserved_0[0x2];
+ u8 reserved_at_1[0x2];
u8 vsd[0x1];
u8 mem_rq_type[0x4];
u8 state[0x4];
- u8 reserved_1[0x1];
+ u8 reserved_at_c[0x1];
u8 flush_in_error_en[0x1];
- u8 reserved_2[0x12];
+ u8 reserved_at_e[0x12];
- u8 reserved_3[0x8];
+ u8 reserved_at_20[0x8];
u8 user_index[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_40[0x8];
u8 cqn[0x18];
u8 counter_set_id[0x8];
- u8 reserved_5[0x18];
+ u8 reserved_at_68[0x18];
- u8 reserved_6[0x8];
+ u8 reserved_at_80[0x8];
u8 rmpn[0x18];
- u8 reserved_7[0xe0];
+ u8 reserved_at_a0[0xe0];
struct mlx5_ifc_wq_bits wq;
};
@@ -2196,31 +2196,31 @@
};
struct mlx5_ifc_rmpc_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 state[0x4];
- u8 reserved_1[0x14];
+ u8 reserved_at_c[0x14];
u8 basic_cyclic_rcv_wqe[0x1];
- u8 reserved_2[0x1f];
+ u8 reserved_at_21[0x1f];
- u8 reserved_3[0x140];
+ u8 reserved_at_40[0x140];
struct mlx5_ifc_wq_bits wq;
};
struct mlx5_ifc_nic_vport_context_bits {
- u8 reserved_0[0x1f];
+ u8 reserved_at_0[0x1f];
u8 roce_en[0x1];
u8 arm_change_event[0x1];
- u8 reserved_1[0x1a];
+ u8 reserved_at_21[0x1a];
u8 event_on_mtu[0x1];
u8 event_on_promisc_change[0x1];
u8 event_on_vlan_change[0x1];
u8 event_on_mc_address_change[0x1];
u8 event_on_uc_address_change[0x1];
- u8 reserved_2[0xf0];
+ u8 reserved_at_40[0xf0];
u8 mtu[0x10];
@@ -2228,21 +2228,21 @@
u8 port_guid[0x40];
u8 node_guid[0x40];
- u8 reserved_3[0x140];
+ u8 reserved_at_200[0x140];
u8 qkey_violation_counter[0x10];
- u8 reserved_4[0x430];
+ u8 reserved_at_350[0x430];
u8 promisc_uc[0x1];
u8 promisc_mc[0x1];
u8 promisc_all[0x1];
- u8 reserved_5[0x2];
+ u8 reserved_at_783[0x2];
u8 allowed_list_type[0x3];
- u8 reserved_6[0xc];
+ u8 reserved_at_788[0xc];
u8 allowed_list_size[0xc];
struct mlx5_ifc_mac_address_layout_bits permanent_address;
- u8 reserved_7[0x20];
+ u8 reserved_at_7e0[0x20];
u8 current_uc_mac_address[0][0x40];
};
@@ -2254,9 +2254,9 @@
};
struct mlx5_ifc_mkc_bits {
- u8 reserved_0[0x1];
+ u8 reserved_at_0[0x1];
u8 free[0x1];
- u8 reserved_1[0xd];
+ u8 reserved_at_2[0xd];
u8 small_fence_on_rdma_read_response[0x1];
u8 umr_en[0x1];
u8 a[0x1];
@@ -2265,19 +2265,19 @@
u8 lw[0x1];
u8 lr[0x1];
u8 access_mode[0x2];
- u8 reserved_2[0x8];
+ u8 reserved_at_18[0x8];
u8 qpn[0x18];
u8 mkey_7_0[0x8];
- u8 reserved_3[0x20];
+ u8 reserved_at_40[0x20];
u8 length64[0x1];
u8 bsf_en[0x1];
u8 sync_umr[0x1];
- u8 reserved_4[0x2];
+ u8 reserved_at_63[0x2];
u8 expected_sigerr_count[0x1];
- u8 reserved_5[0x1];
+ u8 reserved_at_66[0x1];
u8 en_rinval[0x1];
u8 pd[0x18];
@@ -2287,18 +2287,18 @@
u8 bsf_octword_size[0x20];
- u8 reserved_6[0x80];
+ u8 reserved_at_120[0x80];
u8 translations_octword_size[0x20];
- u8 reserved_7[0x1b];
+ u8 reserved_at_1c0[0x1b];
u8 log_page_size[0x5];
- u8 reserved_8[0x20];
+ u8 reserved_at_1e0[0x20];
};
struct mlx5_ifc_pkey_bits {
- u8 reserved_0[0x10];
+ u8 reserved_at_0[0x10];
u8 pkey[0x10];
};
@@ -2309,19 +2309,19 @@
struct mlx5_ifc_hca_vport_context_bits {
u8 field_select[0x20];
- u8 reserved_0[0xe0];
+ u8 reserved_at_20[0xe0];
u8 sm_virt_aware[0x1];
u8 has_smi[0x1];
u8 has_raw[0x1];
u8 grh_required[0x1];
- u8 reserved_1[0xc];
+ u8 reserved_at_104[0xc];
u8 port_physical_state[0x4];
u8 vport_state_policy[0x4];
u8 port_state[0x4];
u8 vport_state[0x4];
- u8 reserved_2[0x20];
+ u8 reserved_at_120[0x20];
u8 system_image_guid[0x40];
@@ -2337,33 +2337,33 @@
u8 cap_mask2_field_select[0x20];
- u8 reserved_3[0x80];
+ u8 reserved_at_280[0x80];
u8 lid[0x10];
- u8 reserved_4[0x4];
+ u8 reserved_at_310[0x4];
u8 init_type_reply[0x4];
u8 lmc[0x3];
u8 subnet_timeout[0x5];
u8 sm_lid[0x10];
u8 sm_sl[0x4];
- u8 reserved_5[0xc];
+ u8 reserved_at_334[0xc];
u8 qkey_violation_counter[0x10];
u8 pkey_violation_counter[0x10];
- u8 reserved_6[0xca0];
+ u8 reserved_at_360[0xca0];
};
struct mlx5_ifc_esw_vport_context_bits {
- u8 reserved_0[0x3];
+ u8 reserved_at_0[0x3];
u8 vport_svlan_strip[0x1];
u8 vport_cvlan_strip[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_insert[0x2];
- u8 reserved_1[0x18];
+ u8 reserved_at_8[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_20[0x20];
u8 svlan_cfi[0x1];
u8 svlan_pcp[0x3];
@@ -2372,7 +2372,7 @@
u8 cvlan_pcp[0x3];
u8 cvlan_id[0xc];
- u8 reserved_3[0x7a0];
+ u8 reserved_at_60[0x7a0];
};
enum {
@@ -2387,41 +2387,41 @@
struct mlx5_ifc_eqc_bits {
u8 status[0x4];
- u8 reserved_0[0x9];
+ u8 reserved_at_4[0x9];
u8 ec[0x1];
u8 oi[0x1];
- u8 reserved_1[0x5];
+ u8 reserved_at_f[0x5];
u8 st[0x4];
- u8 reserved_2[0x8];
+ u8 reserved_at_18[0x8];
- u8 reserved_3[0x20];
+ u8 reserved_at_20[0x20];
- u8 reserved_4[0x14];
+ u8 reserved_at_40[0x14];
u8 page_offset[0x6];
- u8 reserved_5[0x6];
+ u8 reserved_at_5a[0x6];
- u8 reserved_6[0x3];
+ u8 reserved_at_60[0x3];
u8 log_eq_size[0x5];
u8 uar_page[0x18];
- u8 reserved_7[0x20];
+ u8 reserved_at_80[0x20];
- u8 reserved_8[0x18];
+ u8 reserved_at_a0[0x18];
u8 intr[0x8];
- u8 reserved_9[0x3];
+ u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
- u8 reserved_10[0x18];
+ u8 reserved_at_c8[0x18];
- u8 reserved_11[0x60];
+ u8 reserved_at_e0[0x60];
- u8 reserved_12[0x8];
+ u8 reserved_at_140[0x8];
u8 consumer_counter[0x18];
- u8 reserved_13[0x8];
+ u8 reserved_at_160[0x8];
u8 producer_counter[0x18];
- u8 reserved_14[0x80];
+ u8 reserved_at_180[0x80];
};
enum {
@@ -2445,14 +2445,14 @@
};
struct mlx5_ifc_dctc_bits {
- u8 reserved_0[0x4];
+ u8 reserved_at_0[0x4];
u8 state[0x4];
- u8 reserved_1[0x18];
+ u8 reserved_at_8[0x18];
- u8 reserved_2[0x8];
+ u8 reserved_at_20[0x8];
u8 user_index[0x18];
- u8 reserved_3[0x8];
+ u8 reserved_at_40[0x8];
u8 cqn[0x18];
u8 counter_set_id[0x8];
@@ -2464,45 +2464,45 @@
u8 latency_sensitive[0x1];
u8 rlky[0x1];
u8 free_ar[0x1];
- u8 reserved_4[0xd];
+ u8 reserved_at_73[0xd];
- u8 reserved_5[0x8];
+ u8 reserved_at_80[0x8];
u8 cs_res[0x8];
- u8 reserved_6[0x3];
+ u8 reserved_at_90[0x3];
u8 min_rnr_nak[0x5];
- u8 reserved_7[0x8];
+ u8 reserved_at_98[0x8];
- u8 reserved_8[0x8];
+ u8 reserved_at_a0[0x8];
u8 srqn[0x18];
- u8 reserved_9[0x8];
+ u8 reserved_at_c0[0x8];
u8 pd[0x18];
u8 tclass[0x8];
- u8 reserved_10[0x4];
+ u8 reserved_at_e8[0x4];
u8 flow_label[0x14];
u8 dc_access_key[0x40];
- u8 reserved_11[0x5];
+ u8 reserved_at_140[0x5];
u8 mtu[0x3];
u8 port[0x8];
u8 pkey_index[0x10];
- u8 reserved_12[0x8];
+ u8 reserved_at_160[0x8];
u8 my_addr_index[0x8];
- u8 reserved_13[0x8];
+ u8 reserved_at_170[0x8];
u8 hop_limit[0x8];
u8 dc_access_key_violation_count[0x20];
- u8 reserved_14[0x14];
+ u8 reserved_at_1a0[0x14];
u8 dei_cfi[0x1];
u8 eth_prio[0x3];
u8 ecn[0x2];
u8 dscp[0x6];
- u8 reserved_15[0x40];
+ u8 reserved_at_1c0[0x40];
};
enum {
@@ -2524,54 +2524,54 @@
struct mlx5_ifc_cqc_bits {
u8 status[0x4];
- u8 reserved_0[0x4];
+ u8 reserved_at_4[0x4];
u8 cqe_sz[0x3];
u8 cc[0x1];
- u8 reserved_1[0x1];
+ u8 reserved_at_c[0x1];
u8 scqe_break_moderation_en[0x1];
u8 oi[0x1];
- u8 reserved_2[0x2];
+ u8 reserved_at_f[0x2];
u8 cqe_zip_en[0x1];
u8 mini_cqe_res_format[0x2];
u8 st[0x4];
- u8 reserved_3[0x8];
+ u8 reserved_at_18[0x8];
- u8 reserved_4[0x20];
+ u8 reserved_at_20[0x20];
- u8 reserved_5[0x14];
+ u8 reserved_at_40[0x14];
u8 page_offset[0x6];
- u8 reserved_6[0x6];
+ u8 reserved_at_5a[0x6];
- u8 reserved_7[0x3];
+ u8 reserved_at_60[0x3];
u8 log_cq_size[0x5];
u8 uar_page[0x18];
- u8 reserved_8[0x4];
+ u8 reserved_at_80[0x4];
u8 cq_period[0xc];
u8 cq_max_count[0x10];
- u8 reserved_9[0x18];
+ u8 reserved_at_a0[0x18];
u8 c_eqn[0x8];
- u8 reserved_10[0x3];
+ u8 reserved_at_c0[0x3];
u8 log_page_size[0x5];
- u8 reserved_11[0x18];
+ u8 reserved_at_c8[0x18];
- u8 reserved_12[0x20];
+ u8 reserved_at_e0[0x20];
- u8 reserved_13[0x8];
+ u8 reserved_at_100[0x8];
u8 last_notified_index[0x18];
- u8 reserved_14[0x8];
+ u8 reserved_at_120[0x8];
u8 last_solicit_index[0x18];
- u8 reserved_15[0x8];
+ u8 reserved_at_140[0x8];
u8 consumer_counter[0x18];
- u8 reserved_16[0x8];
+ u8 reserved_at_160[0x8];
u8 producer_counter[0x18];
- u8 reserved_17[0x40];
+ u8 reserved_at_180[0x40];
u8 dbr_addr[0x40];
};
@@ -2580,16 +2580,16 @@
struct mlx5_ifc_cong_control_802_1qau_rp_bits cong_control_802_1qau_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_rp_bits cong_control_r_roce_ecn_rp;
struct mlx5_ifc_cong_control_r_roce_ecn_np_bits cong_control_r_roce_ecn_np;
- u8 reserved_0[0x800];
+ u8 reserved_at_0[0x800];
};
struct mlx5_ifc_query_adapter_param_block_bits {
- u8 reserved_0[0xc0];
+ u8 reserved_at_0[0xc0];
- u8 reserved_1[0x8];
+ u8 reserved_at_c0[0x8];
u8 ieee_vendor_id[0x18];
- u8 reserved_2[0x10];
+ u8 reserved_at_e0[0x10];
u8 vsd_vendor_id[0x10];
u8 vsd[208][0x8];
@@ -2600,14 +2600,14 @@
union mlx5_ifc_modify_field_select_resize_field_select_auto_bits {
struct mlx5_ifc_modify_field_select_bits modify_field_select;
struct mlx5_ifc_resize_field_select_bits resize_field_select;
- u8 reserved_0[0x20];
+ u8 reserved_at_0[0x20];
};
union mlx5_ifc_field_select_802_1_r_roce_auto_bits {
struct mlx5_ifc_field_select_802_1qau_rp_bits field_select_802_1qau_rp;
struct mlx5_ifc_field_select_r_roce_rp_bits field_select_r_roce_rp;
struct mlx5_ifc_field_select_r_roce_np_bits field_select_r_roce_np;
- u8 reserved_0[0x20];
+ u8 reserved_at_0[0x20];
};
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits {
@@ -2619,7 +2619,7 @@
struct mlx5_ifc_eth_per_prio_grp_data_layout_bits eth_per_prio_grp_data_layout;
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits eth_per_traffic_grp_data_layout;
struct mlx5_ifc_phys_layer_cntrs_bits phys_layer_cntrs;
- u8 reserved_0[0x7c0];
+ u8 reserved_at_0[0x7c0];
};
union mlx5_ifc_event_auto_bits {
@@ -2635,23 +2635,23 @@
struct mlx5_ifc_db_bf_congestion_event_bits db_bf_congestion_event;
struct mlx5_ifc_stall_vl_event_bits stall_vl_event;
struct mlx5_ifc_cmd_inter_comp_event_bits cmd_inter_comp_event;
- u8 reserved_0[0xe0];
+ u8 reserved_at_0[0xe0];
};
struct mlx5_ifc_health_buffer_bits {
- u8 reserved_0[0x100];
+ u8 reserved_at_0[0x100];
u8 assert_existptr[0x20];
u8 assert_callra[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_140[0x40];
u8 fw_version[0x20];
u8 hw_id[0x20];
- u8 reserved_2[0x20];
+ u8 reserved_at_1c0[0x20];
u8 irisc_index[0x8];
u8 synd[0x8];
@@ -2660,20 +2660,20 @@
struct mlx5_ifc_register_loopback_control_bits {
u8 no_lb[0x1];
- u8 reserved_0[0x7];
+ u8 reserved_at_1[0x7];
u8 port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_2[0x60];
+ u8 reserved_at_20[0x60];
};
struct mlx5_ifc_teardown_hca_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
enum {
@@ -2683,108 +2683,108 @@
struct mlx5_ifc_teardown_hca_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_40[0x10];
u8 profile[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_sqerr2rts_qp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_sqerr2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_4[0x20];
+ u8 reserved_at_a0[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_5[0x80];
+ u8 reserved_at_800[0x80];
};
struct mlx5_ifc_sqd2rts_qp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_sqd2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_4[0x20];
+ u8 reserved_at_a0[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_5[0x80];
+ u8 reserved_at_800[0x80];
};
struct mlx5_ifc_set_roce_address_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_roce_address_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_50[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
struct mlx5_ifc_roce_addr_layout_bits roce_address;
};
struct mlx5_ifc_set_mad_demux_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
enum {
@@ -2794,89 +2794,89 @@
struct mlx5_ifc_set_mad_demux_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x20];
+ u8 reserved_at_40[0x20];
- u8 reserved_3[0x6];
+ u8 reserved_at_60[0x6];
u8 demux_mode[0x2];
- u8 reserved_4[0x18];
+ u8 reserved_at_68[0x18];
};
struct mlx5_ifc_set_l2_table_entry_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_l2_table_entry_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x60];
+ u8 reserved_at_40[0x60];
- u8 reserved_3[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_index[0x18];
- u8 reserved_4[0x20];
+ u8 reserved_at_c0[0x20];
- u8 reserved_5[0x13];
+ u8 reserved_at_e0[0x13];
u8 vlan_valid[0x1];
u8 vlan[0xc];
struct mlx5_ifc_mac_address_layout_bits mac_address;
- u8 reserved_6[0xc0];
+ u8 reserved_at_140[0xc0];
};
struct mlx5_ifc_set_issi_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_issi_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_40[0x10];
u8 current_issi[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_set_hca_cap_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_hca_cap_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
union mlx5_ifc_hca_cap_union_bits capability;
};
@@ -2890,156 +2890,156 @@
struct mlx5_ifc_set_fte_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_fte_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 table_type[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_5[0x18];
+ u8 reserved_at_c0[0x18];
u8 modify_enable_mask[0x8];
- u8 reserved_6[0x20];
+ u8 reserved_at_e0[0x20];
u8 flow_index[0x20];
- u8 reserved_7[0xe0];
+ u8 reserved_at_120[0xe0];
struct mlx5_ifc_flow_context_bits flow_context;
};
struct mlx5_ifc_rts2rts_qp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_rts2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_4[0x20];
+ u8 reserved_at_a0[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_5[0x80];
+ u8 reserved_at_800[0x80];
};
struct mlx5_ifc_rtr2rts_qp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_rtr2rts_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_4[0x20];
+ u8 reserved_at_a0[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_5[0x80];
+ u8 reserved_at_800[0x80];
};
struct mlx5_ifc_rst2init_qp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_rst2init_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_4[0x20];
+ u8 reserved_at_a0[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_5[0x80];
+ u8 reserved_at_800[0x80];
};
struct mlx5_ifc_query_xrc_srq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
- u8 reserved_2[0x600];
+ u8 reserved_at_280[0x600];
u8 pas[0][0x40];
};
struct mlx5_ifc_query_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 xrc_srqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
enum {
@@ -3049,13 +3049,13 @@
struct mlx5_ifc_query_vport_state_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x20];
+ u8 reserved_at_40[0x20];
- u8 reserved_2[0x18];
+ u8 reserved_at_60[0x18];
u8 admin_state[0x4];
u8 state[0x4];
};
@@ -3067,25 +3067,25 @@
struct mlx5_ifc_query_vport_state_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xf];
+ u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_vport_counter_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_traffic_counter_bits received_errors;
@@ -3111,7 +3111,7 @@
struct mlx5_ifc_traffic_counter_bits transmitted_eth_multicast;
- u8 reserved_2[0xa00];
+ u8 reserved_at_680[0xa00];
};
enum {
@@ -3120,328 +3120,328 @@
struct mlx5_ifc_query_vport_counter_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xf];
+ u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_3[0x60];
+ u8 reserved_at_60[0x60];
u8 clear[0x1];
- u8 reserved_4[0x1f];
+ u8 reserved_at_c1[0x1f];
- u8 reserved_5[0x20];
+ u8 reserved_at_e0[0x20];
};
struct mlx5_ifc_query_tis_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_tisc_bits tis_context;
};
struct mlx5_ifc_query_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 tisn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_tir_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_tirc_bits tir_context;
};
struct mlx5_ifc_query_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 tirn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_srq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_srqc_bits srq_context_entry;
- u8 reserved_2[0x600];
+ u8 reserved_at_280[0x600];
u8 pas[0][0x40];
};
struct mlx5_ifc_query_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 srqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_sq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_sqc_bits sq_context;
};
struct mlx5_ifc_query_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 sqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_special_contexts_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x20];
+ u8 reserved_at_40[0x20];
u8 resd_lkey[0x20];
};
struct mlx5_ifc_query_special_contexts_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_query_rqt_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_rqtc_bits rqt_context;
};
struct mlx5_ifc_query_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 rqtn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_rq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_rqc_bits rq_context;
};
struct mlx5_ifc_query_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 rqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_roce_address_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_roce_addr_layout_bits roce_address;
};
struct mlx5_ifc_query_roce_address_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 roce_address_index[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_50[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_rmp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_rmpc_bits rmp_context;
};
struct mlx5_ifc_query_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 rmpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_qp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
u8 opt_param_mask[0x20];
- u8 reserved_2[0x20];
+ u8 reserved_at_a0[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_3[0x80];
+ u8 reserved_at_800[0x80];
u8 pas[0][0x40];
};
struct mlx5_ifc_query_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_q_counter_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
u8 rx_write_requests[0x20];
- u8 reserved_2[0x20];
+ u8 reserved_at_a0[0x20];
u8 rx_read_requests[0x20];
- u8 reserved_3[0x20];
+ u8 reserved_at_e0[0x20];
u8 rx_atomic_requests[0x20];
- u8 reserved_4[0x20];
+ u8 reserved_at_120[0x20];
u8 rx_dct_connect[0x20];
- u8 reserved_5[0x20];
+ u8 reserved_at_160[0x20];
u8 out_of_buffer[0x20];
- u8 reserved_6[0x20];
+ u8 reserved_at_1a0[0x20];
u8 out_of_sequence[0x20];
- u8 reserved_7[0x620];
+ u8 reserved_at_1e0[0x620];
};
struct mlx5_ifc_query_q_counter_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x80];
+ u8 reserved_at_40[0x80];
u8 clear[0x1];
- u8 reserved_3[0x1f];
+ u8 reserved_at_c1[0x1f];
- u8 reserved_4[0x18];
+ u8 reserved_at_e0[0x18];
u8 counter_set_id[0x8];
};
struct mlx5_ifc_query_pages_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x10];
+ u8 reserved_at_40[0x10];
u8 function_id[0x10];
u8 num_pages[0x20];
@@ -3455,55 +3455,55 @@
struct mlx5_ifc_query_pages_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_40[0x10];
u8 function_id[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_nic_vport_context_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
};
struct mlx5_ifc_query_nic_vport_context_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xf];
+ u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_3[0x5];
+ u8 reserved_at_60[0x5];
u8 allowed_list_type[0x3];
- u8 reserved_4[0x18];
+ u8 reserved_at_68[0x18];
};
struct mlx5_ifc_query_mkey_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
- u8 reserved_2[0x600];
+ u8 reserved_at_280[0x600];
u8 bsf0_klm0_pas_mtt0_1[16][0x8];
@@ -3512,265 +3512,265 @@
struct mlx5_ifc_query_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 mkey_index[0x18];
u8 pg_access[0x1];
- u8 reserved_3[0x1f];
+ u8 reserved_at_61[0x1f];
};
struct mlx5_ifc_query_mad_demux_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
u8 mad_dumux_parameters_block[0x20];
};
struct mlx5_ifc_query_mad_demux_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_query_l2_table_entry_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0xa0];
+ u8 reserved_at_40[0xa0];
- u8 reserved_2[0x13];
+ u8 reserved_at_e0[0x13];
u8 vlan_valid[0x1];
u8 vlan[0xc];
struct mlx5_ifc_mac_address_layout_bits mac_address;
- u8 reserved_3[0xc0];
+ u8 reserved_at_140[0xc0];
};
struct mlx5_ifc_query_l2_table_entry_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x60];
+ u8 reserved_at_40[0x60];
- u8 reserved_3[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_index[0x18];
- u8 reserved_4[0x140];
+ u8 reserved_at_c0[0x140];
};
struct mlx5_ifc_query_issi_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x10];
+ u8 reserved_at_40[0x10];
u8 current_issi[0x10];
- u8 reserved_2[0xa0];
+ u8 reserved_at_60[0xa0];
- u8 supported_issi_reserved[76][0x8];
+ u8 reserved_at_100[76][0x8];
u8 supported_issi_dw0[0x20];
};
struct mlx5_ifc_query_issi_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_query_hca_vport_pkey_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_pkey_bits pkey[0];
};
struct mlx5_ifc_query_hca_vport_pkey_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xb];
+ u8 reserved_at_41[0xb];
u8 port_num[0x4];
u8 vport_number[0x10];
- u8 reserved_3[0x10];
+ u8 reserved_at_60[0x10];
u8 pkey_index[0x10];
};
struct mlx5_ifc_query_hca_vport_gid_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x20];
+ u8 reserved_at_40[0x20];
u8 gids_num[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_70[0x10];
struct mlx5_ifc_array128_auto_bits gid[0];
};
struct mlx5_ifc_query_hca_vport_gid_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xb];
+ u8 reserved_at_41[0xb];
u8 port_num[0x4];
u8 vport_number[0x10];
- u8 reserved_3[0x10];
+ u8 reserved_at_60[0x10];
u8 gid_index[0x10];
};
struct mlx5_ifc_query_hca_vport_context_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
};
struct mlx5_ifc_query_hca_vport_context_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xb];
+ u8 reserved_at_41[0xb];
u8 port_num[0x4];
u8 vport_number[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_hca_cap_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
union mlx5_ifc_hca_cap_union_bits capability;
};
struct mlx5_ifc_query_hca_cap_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_query_flow_table_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x80];
+ u8 reserved_at_40[0x80];
- u8 reserved_2[0x8];
+ u8 reserved_at_c0[0x8];
u8 level[0x8];
- u8 reserved_3[0x8];
+ u8 reserved_at_d0[0x8];
u8 log_size[0x8];
- u8 reserved_4[0x120];
+ u8 reserved_at_e0[0x120];
};
struct mlx5_ifc_query_flow_table_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 table_type[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_5[0x140];
+ u8 reserved_at_c0[0x140];
};
struct mlx5_ifc_query_fte_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x1c0];
+ u8 reserved_at_40[0x1c0];
struct mlx5_ifc_flow_context_bits flow_context;
};
struct mlx5_ifc_query_fte_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 table_type[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_5[0x40];
+ u8 reserved_at_c0[0x40];
u8 flow_index[0x20];
- u8 reserved_6[0xe0];
+ u8 reserved_at_120[0xe0];
};
enum {
@@ -3781,84 +3781,84 @@
struct mlx5_ifc_query_flow_group_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0xa0];
+ u8 reserved_at_40[0xa0];
u8 start_flow_index[0x20];
- u8 reserved_2[0x20];
+ u8 reserved_at_100[0x20];
u8 end_flow_index[0x20];
- u8 reserved_3[0xa0];
+ u8 reserved_at_140[0xa0];
- u8 reserved_4[0x18];
+ u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
struct mlx5_ifc_fte_match_param_bits match_criteria;
- u8 reserved_5[0xe00];
+ u8 reserved_at_1200[0xe00];
};
struct mlx5_ifc_query_flow_group_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 table_type[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 group_id[0x20];
- u8 reserved_5[0x120];
+ u8 reserved_at_e0[0x120];
};
struct mlx5_ifc_query_esw_vport_context_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_esw_vport_context_bits esw_vport_context;
};
struct mlx5_ifc_query_esw_vport_context_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xf];
+ u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_modify_esw_vport_context_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_esw_vport_context_fields_select_bits {
- u8 reserved[0x1c];
+ u8 reserved_at_0[0x1c];
u8 vport_cvlan_insert[0x1];
u8 vport_svlan_insert[0x1];
u8 vport_cvlan_strip[0x1];
@@ -3867,13 +3867,13 @@
struct mlx5_ifc_modify_esw_vport_context_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xf];
+ u8 reserved_at_41[0xf];
u8 vport_number[0x10];
struct mlx5_ifc_esw_vport_context_fields_select_bits field_select;
@@ -3883,124 +3883,124 @@
struct mlx5_ifc_query_eq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_eqc_bits eq_context_entry;
- u8 reserved_2[0x40];
+ u8 reserved_at_280[0x40];
u8 event_bitmask[0x40];
- u8 reserved_3[0x580];
+ u8 reserved_at_300[0x580];
u8 pas[0][0x40];
};
struct mlx5_ifc_query_eq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x18];
+ u8 reserved_at_40[0x18];
u8 eq_number[0x8];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_dctc_bits dct_context_entry;
- u8 reserved_2[0x180];
+ u8 reserved_at_280[0x180];
};
struct mlx5_ifc_query_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 dctn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_cq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_2[0x600];
+ u8 reserved_at_280[0x600];
u8 pas[0][0x40];
};
struct mlx5_ifc_query_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 cqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_cong_status_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x20];
+ u8 reserved_at_40[0x20];
u8 enable[0x1];
u8 tag_enable[0x1];
- u8 reserved_2[0x1e];
+ u8 reserved_at_62[0x1e];
};
struct mlx5_ifc_query_cong_status_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x18];
+ u8 reserved_at_40[0x18];
u8 priority[0x4];
u8 cong_protocol[0x4];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_cong_statistics_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
u8 cur_flows[0x20];
@@ -4014,7 +4014,7 @@
u8 cnp_handled_low[0x20];
- u8 reserved_2[0x100];
+ u8 reserved_at_140[0x100];
u8 time_stamp_high[0x20];
@@ -4030,453 +4030,453 @@
u8 cnps_sent_low[0x20];
- u8 reserved_3[0x560];
+ u8 reserved_at_320[0x560];
};
struct mlx5_ifc_query_cong_statistics_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 clear[0x1];
- u8 reserved_2[0x1f];
+ u8 reserved_at_41[0x1f];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_cong_params_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
};
struct mlx5_ifc_query_cong_params_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x1c];
+ u8 reserved_at_40[0x1c];
u8 cong_protocol[0x4];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_query_adapter_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_query_adapter_param_block_bits query_adapter_struct;
};
struct mlx5_ifc_query_adapter_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_qp_2rst_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_qp_2rst_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_qp_2err_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_qp_2err_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_page_fault_resume_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_page_fault_resume_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 error[0x1];
- u8 reserved_2[0x4];
+ u8 reserved_at_41[0x4];
u8 rdma[0x1];
u8 read_write[0x1];
u8 req_res[0x1];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_nop_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_nop_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_vport_state_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_vport_state_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xf];
+ u8 reserved_at_41[0xf];
u8 vport_number[0x10];
- u8 reserved_3[0x18];
+ u8 reserved_at_60[0x18];
u8 admin_state[0x4];
- u8 reserved_4[0x4];
+ u8 reserved_at_7c[0x4];
};
struct mlx5_ifc_modify_tis_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_tis_bitmask_bits {
- u8 reserved_0[0x20];
+ u8 reserved_at_0[0x20];
- u8 reserved_1[0x1f];
+ u8 reserved_at_20[0x1f];
u8 prio[0x1];
};
struct mlx5_ifc_modify_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 tisn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
struct mlx5_ifc_modify_tis_bitmask_bits bitmask;
- u8 reserved_4[0x40];
+ u8 reserved_at_c0[0x40];
struct mlx5_ifc_tisc_bits ctx;
};
struct mlx5_ifc_modify_tir_bitmask_bits {
- u8 reserved_0[0x20];
+ u8 reserved_at_0[0x20];
- u8 reserved_1[0x1b];
+ u8 reserved_at_20[0x1b];
u8 self_lb_en[0x1];
- u8 reserved_2[0x3];
+ u8 reserved_at_3c[0x3];
u8 lro[0x1];
};
struct mlx5_ifc_modify_tir_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 tirn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
struct mlx5_ifc_modify_tir_bitmask_bits bitmask;
- u8 reserved_4[0x40];
+ u8 reserved_at_c0[0x40];
struct mlx5_ifc_tirc_bits ctx;
};
struct mlx5_ifc_modify_sq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 sq_state[0x4];
- u8 reserved_2[0x4];
+ u8 reserved_at_44[0x4];
u8 sqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 modify_bitmask[0x40];
- u8 reserved_4[0x40];
+ u8 reserved_at_c0[0x40];
struct mlx5_ifc_sqc_bits ctx;
};
struct mlx5_ifc_modify_rqt_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_rqt_bitmask_bits {
- u8 reserved[0x20];
+ u8 reserved_at_0[0x20];
- u8 reserved1[0x1f];
+ u8 reserved_at_20[0x1f];
u8 rqn_list[0x1];
};
struct mlx5_ifc_modify_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 rqtn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
struct mlx5_ifc_rqt_bitmask_bits bitmask;
- u8 reserved_4[0x40];
+ u8 reserved_at_c0[0x40];
struct mlx5_ifc_rqtc_bits ctx;
};
struct mlx5_ifc_modify_rq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 rq_state[0x4];
- u8 reserved_2[0x4];
+ u8 reserved_at_44[0x4];
u8 rqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 modify_bitmask[0x40];
- u8 reserved_4[0x40];
+ u8 reserved_at_c0[0x40];
struct mlx5_ifc_rqc_bits ctx;
};
struct mlx5_ifc_modify_rmp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_rmp_bitmask_bits {
- u8 reserved[0x20];
+ u8 reserved_at_0[0x20];
- u8 reserved1[0x1f];
+ u8 reserved_at_20[0x1f];
u8 lwm[0x1];
};
struct mlx5_ifc_modify_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 rmp_state[0x4];
- u8 reserved_2[0x4];
+ u8 reserved_at_44[0x4];
u8 rmpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
struct mlx5_ifc_rmp_bitmask_bits bitmask;
- u8 reserved_4[0x40];
+ u8 reserved_at_c0[0x40];
struct mlx5_ifc_rmpc_bits ctx;
};
struct mlx5_ifc_modify_nic_vport_context_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_nic_vport_field_select_bits {
- u8 reserved_0[0x19];
+ u8 reserved_at_0[0x19];
u8 mtu[0x1];
u8 change_event[0x1];
u8 promisc[0x1];
u8 permanent_address[0x1];
u8 addresses_list[0x1];
u8 roce_en[0x1];
- u8 reserved_1[0x1];
+ u8 reserved_at_1f[0x1];
};
struct mlx5_ifc_modify_nic_vport_context_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xf];
+ u8 reserved_at_41[0xf];
u8 vport_number[0x10];
struct mlx5_ifc_modify_nic_vport_field_select_bits field_select;
- u8 reserved_3[0x780];
+ u8 reserved_at_80[0x780];
struct mlx5_ifc_nic_vport_context_bits nic_vport_context;
};
struct mlx5_ifc_modify_hca_vport_context_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_hca_vport_context_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 other_vport[0x1];
- u8 reserved_2[0xb];
+ u8 reserved_at_41[0xb];
u8 port_num[0x4];
u8 vport_number[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
struct mlx5_ifc_hca_vport_context_bits hca_vport_context;
};
struct mlx5_ifc_modify_cq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
enum {
@@ -4486,83 +4486,83 @@
struct mlx5_ifc_modify_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 cqn[0x18];
union mlx5_ifc_modify_field_select_resize_field_select_auto_bits modify_field_select_resize_field_select;
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_3[0x600];
+ u8 reserved_at_280[0x600];
u8 pas[0][0x40];
};
struct mlx5_ifc_modify_cong_status_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_cong_status_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x18];
+ u8 reserved_at_40[0x18];
u8 priority[0x4];
u8 cong_protocol[0x4];
u8 enable[0x1];
u8 tag_enable[0x1];
- u8 reserved_3[0x1e];
+ u8 reserved_at_62[0x1e];
};
struct mlx5_ifc_modify_cong_params_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_cong_params_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x1c];
+ u8 reserved_at_40[0x1c];
u8 cong_protocol[0x4];
union mlx5_ifc_field_select_802_1_r_roce_auto_bits field_select;
- u8 reserved_3[0x80];
+ u8 reserved_at_80[0x80];
union mlx5_ifc_cong_control_roce_ecn_auto_bits congestion_parameters;
};
struct mlx5_ifc_manage_pages_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
u8 output_num_entries[0x20];
- u8 reserved_1[0x20];
+ u8 reserved_at_60[0x20];
u8 pas[0][0x40];
};
@@ -4575,12 +4575,12 @@
struct mlx5_ifc_manage_pages_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_40[0x10];
u8 function_id[0x10];
u8 input_num_entries[0x20];
@@ -4590,117 +4590,117 @@
struct mlx5_ifc_mad_ifc_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
u8 response_mad_packet[256][0x8];
};
struct mlx5_ifc_mad_ifc_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 remote_lid[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_50[0x8];
u8 port[0x8];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 mad[256][0x8];
};
struct mlx5_ifc_init_hca_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_init_hca_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_init2rtr_qp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_init2rtr_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_4[0x20];
+ u8 reserved_at_a0[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_5[0x80];
+ u8 reserved_at_800[0x80];
};
struct mlx5_ifc_init2init_qp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_init2init_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 opt_param_mask[0x20];
- u8 reserved_4[0x20];
+ u8 reserved_at_a0[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_5[0x80];
+ u8 reserved_at_800[0x80];
};
struct mlx5_ifc_get_dropped_packet_log_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
u8 packet_headers_log[128][0x8];
@@ -4709,1029 +4709,1029 @@
struct mlx5_ifc_get_dropped_packet_log_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_gen_eqe_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x18];
+ u8 reserved_at_40[0x18];
u8 eq_number[0x8];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 eqe[64][0x8];
};
struct mlx5_ifc_gen_eq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_enable_hca_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x20];
+ u8 reserved_at_40[0x20];
};
struct mlx5_ifc_enable_hca_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_40[0x10];
u8 function_id[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_drain_dct_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_drain_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 dctn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_disable_hca_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x20];
+ u8 reserved_at_40[0x20];
};
struct mlx5_ifc_disable_hca_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_40[0x10];
u8 function_id[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_detach_from_mcg_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_detach_from_mcg_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 multicast_gid[16][0x8];
};
struct mlx5_ifc_destroy_xrc_srq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 xrc_srqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_tis_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 tisn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_tir_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 tirn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_srq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 srqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_sq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 sqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_rqt_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 rqtn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_rq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 rqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_rmp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 rmpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_qp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_psv_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_psv_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 psvn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_mkey_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 mkey_index[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_flow_table_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_flow_table_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 table_type[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_5[0x140];
+ u8 reserved_at_c0[0x140];
};
struct mlx5_ifc_destroy_flow_group_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_flow_group_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 table_type[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_id[0x18];
u8 group_id[0x20];
- u8 reserved_5[0x120];
+ u8 reserved_at_e0[0x120];
};
struct mlx5_ifc_destroy_eq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_eq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x18];
+ u8 reserved_at_40[0x18];
u8 eq_number[0x8];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_dct_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 dctn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_destroy_cq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_destroy_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 cqn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_delete_vxlan_udp_dport_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_delete_vxlan_udp_dport_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x20];
+ u8 reserved_at_40[0x20];
- u8 reserved_3[0x10];
+ u8 reserved_at_60[0x10];
u8 vxlan_udp_port[0x10];
};
struct mlx5_ifc_delete_l2_table_entry_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_delete_l2_table_entry_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x60];
+ u8 reserved_at_40[0x60];
- u8 reserved_3[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_index[0x18];
- u8 reserved_4[0x140];
+ u8 reserved_at_c0[0x140];
};
struct mlx5_ifc_delete_fte_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_delete_fte_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 table_type[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_5[0x40];
+ u8 reserved_at_c0[0x40];
u8 flow_index[0x20];
- u8 reserved_6[0xe0];
+ u8 reserved_at_120[0xe0];
};
struct mlx5_ifc_dealloc_xrcd_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_dealloc_xrcd_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 xrcd[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_dealloc_uar_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_dealloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 uar[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_dealloc_transport_domain_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_dealloc_transport_domain_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 transport_domain[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_dealloc_q_counter_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_dealloc_q_counter_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x18];
+ u8 reserved_at_40[0x18];
u8 counter_set_id[0x8];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_dealloc_pd_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_dealloc_pd_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 pd[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_xrc_srq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 xrc_srqn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_xrc_srqc_bits xrc_srq_context_entry;
- u8 reserved_3[0x600];
+ u8 reserved_at_280[0x600];
u8 pas[0][0x40];
};
struct mlx5_ifc_create_tis_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 tisn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_tis_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_tisc_bits ctx;
};
struct mlx5_ifc_create_tir_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 tirn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_tir_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_tirc_bits ctx;
};
struct mlx5_ifc_create_srq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 srqn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_srqc_bits srq_context_entry;
- u8 reserved_3[0x600];
+ u8 reserved_at_280[0x600];
u8 pas[0][0x40];
};
struct mlx5_ifc_create_sq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 sqn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_sq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_sqc_bits ctx;
};
struct mlx5_ifc_create_rqt_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 rqtn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_rqt_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_rqtc_bits rqt_context;
};
struct mlx5_ifc_create_rq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 rqn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_rqc_bits ctx;
};
struct mlx5_ifc_create_rmp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 rmpn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_rmp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0xc0];
+ u8 reserved_at_40[0xc0];
struct mlx5_ifc_rmpc_bits ctx;
};
struct mlx5_ifc_create_qp_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_qp_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 opt_param_mask[0x20];
- u8 reserved_3[0x20];
+ u8 reserved_at_a0[0x20];
struct mlx5_ifc_qpc_bits qpc;
- u8 reserved_4[0x80];
+ u8 reserved_at_800[0x80];
u8 pas[0][0x40];
};
struct mlx5_ifc_create_psv_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
- u8 reserved_2[0x8];
+ u8 reserved_at_80[0x8];
u8 psv0_index[0x18];
- u8 reserved_3[0x8];
+ u8 reserved_at_a0[0x8];
u8 psv1_index[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_c0[0x8];
u8 psv2_index[0x18];
- u8 reserved_5[0x8];
+ u8 reserved_at_e0[0x8];
u8 psv3_index[0x18];
};
struct mlx5_ifc_create_psv_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 num_psv[0x4];
- u8 reserved_2[0x4];
+ u8 reserved_at_44[0x4];
u8 pd[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_mkey_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 mkey_index[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_mkey_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x20];
+ u8 reserved_at_40[0x20];
u8 pg_access[0x1];
- u8 reserved_3[0x1f];
+ u8 reserved_at_61[0x1f];
struct mlx5_ifc_mkc_bits memory_key_mkey_entry;
- u8 reserved_4[0x80];
+ u8 reserved_at_280[0x80];
u8 translations_octword_actual_size[0x20];
- u8 reserved_5[0x560];
+ u8 reserved_at_320[0x560];
u8 klm_pas_mtt[0][0x20];
};
struct mlx5_ifc_create_flow_table_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 table_id[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_flow_table_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 table_type[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_4[0x20];
+ u8 reserved_at_a0[0x20];
- u8 reserved_5[0x4];
+ u8 reserved_at_c0[0x4];
u8 table_miss_mode[0x4];
u8 level[0x8];
- u8 reserved_6[0x8];
+ u8 reserved_at_d0[0x8];
u8 log_size[0x8];
- u8 reserved_7[0x8];
+ u8 reserved_at_e0[0x8];
u8 table_miss_id[0x18];
- u8 reserved_8[0x100];
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_create_flow_group_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 group_id[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
enum {
@@ -5742,134 +5742,134 @@
struct mlx5_ifc_create_flow_group_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 table_type[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_5[0x20];
+ u8 reserved_at_c0[0x20];
u8 start_flow_index[0x20];
- u8 reserved_6[0x20];
+ u8 reserved_at_100[0x20];
u8 end_flow_index[0x20];
- u8 reserved_7[0xa0];
+ u8 reserved_at_140[0xa0];
- u8 reserved_8[0x18];
+ u8 reserved_at_1e0[0x18];
u8 match_criteria_enable[0x8];
struct mlx5_ifc_fte_match_param_bits match_criteria;
- u8 reserved_9[0xe00];
+ u8 reserved_at_1200[0xe00];
};
struct mlx5_ifc_create_eq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x18];
+ u8 reserved_at_40[0x18];
u8 eq_number[0x8];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_eq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_eqc_bits eq_context_entry;
- u8 reserved_3[0x40];
+ u8 reserved_at_280[0x40];
u8 event_bitmask[0x40];
- u8 reserved_4[0x580];
+ u8 reserved_at_300[0x580];
u8 pas[0][0x40];
};
struct mlx5_ifc_create_dct_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 dctn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_dctc_bits dct_context_entry;
- u8 reserved_3[0x180];
+ u8 reserved_at_280[0x180];
};
struct mlx5_ifc_create_cq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 cqn[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_create_cq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
struct mlx5_ifc_cqc_bits cq_context;
- u8 reserved_3[0x600];
+ u8 reserved_at_280[0x600];
u8 pas[0][0x40];
};
struct mlx5_ifc_config_int_moderation_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x4];
+ u8 reserved_at_40[0x4];
u8 min_delay[0xc];
u8 int_vector[0x10];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
enum {
@@ -5879,49 +5879,49 @@
struct mlx5_ifc_config_int_moderation_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x4];
+ u8 reserved_at_40[0x4];
u8 min_delay[0xc];
u8 int_vector[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_attach_to_mcg_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_attach_to_mcg_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 qpn[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
u8 multicast_gid[16][0x8];
};
struct mlx5_ifc_arm_xrc_srq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
enum {
@@ -5930,25 +5930,25 @@
struct mlx5_ifc_arm_xrc_srq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 xrc_srqn[0x18];
- u8 reserved_3[0x10];
+ u8 reserved_at_60[0x10];
u8 lwm[0x10];
};
struct mlx5_ifc_arm_rq_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
enum {
@@ -5957,179 +5957,179 @@
struct mlx5_ifc_arm_rq_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 srq_number[0x18];
- u8 reserved_3[0x10];
+ u8 reserved_at_60[0x10];
u8 lwm[0x10];
};
struct mlx5_ifc_arm_dct_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_arm_dct_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_40[0x8];
u8 dct_number[0x18];
- u8 reserved_3[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_alloc_xrcd_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 xrcd[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_alloc_xrcd_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_alloc_uar_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 uar[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_alloc_uar_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_alloc_transport_domain_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 transport_domain[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_alloc_transport_domain_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_alloc_q_counter_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x18];
+ u8 reserved_at_40[0x18];
u8 counter_set_id[0x8];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_alloc_q_counter_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_alloc_pd_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x8];
+ u8 reserved_at_40[0x8];
u8 pd[0x18];
- u8 reserved_2[0x20];
+ u8 reserved_at_60[0x20];
};
struct mlx5_ifc_alloc_pd_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_add_vxlan_udp_dport_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_add_vxlan_udp_dport_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x20];
+ u8 reserved_at_40[0x20];
- u8 reserved_3[0x10];
+ u8 reserved_at_60[0x10];
u8 vxlan_udp_port[0x10];
};
struct mlx5_ifc_access_register_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
u8 register_data[0][0x20];
};
@@ -6141,12 +6141,12 @@
struct mlx5_ifc_access_register_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_40[0x10];
u8 register_id[0x10];
u8 argument[0x20];
@@ -6159,24 +6159,24 @@
u8 version[0x4];
u8 local_port[0x8];
u8 pnat[0x2];
- u8 reserved_0[0x2];
+ u8 reserved_at_12[0x2];
u8 lane[0x4];
- u8 reserved_1[0x8];
+ u8 reserved_at_18[0x8];
- u8 reserved_2[0x20];
+ u8 reserved_at_20[0x20];
- u8 reserved_3[0x7];
+ u8 reserved_at_40[0x7];
u8 polarity[0x1];
u8 ob_tap0[0x8];
u8 ob_tap1[0x8];
u8 ob_tap2[0x8];
- u8 reserved_4[0xc];
+ u8 reserved_at_60[0xc];
u8 ob_preemp_mode[0x4];
u8 ob_reg[0x8];
u8 ob_bias[0x8];
- u8 reserved_5[0x20];
+ u8 reserved_at_80[0x20];
};
struct mlx5_ifc_slrg_reg_bits {
@@ -6184,36 +6184,36 @@
u8 version[0x4];
u8 local_port[0x8];
u8 pnat[0x2];
- u8 reserved_0[0x2];
+ u8 reserved_at_12[0x2];
u8 lane[0x4];
- u8 reserved_1[0x8];
+ u8 reserved_at_18[0x8];
u8 time_to_link_up[0x10];
- u8 reserved_2[0xc];
+ u8 reserved_at_30[0xc];
u8 grade_lane_speed[0x4];
u8 grade_version[0x8];
u8 grade[0x18];
- u8 reserved_3[0x4];
+ u8 reserved_at_60[0x4];
u8 height_grade_type[0x4];
u8 height_grade[0x18];
u8 height_dz[0x10];
u8 height_dv[0x10];
- u8 reserved_4[0x10];
+ u8 reserved_at_a0[0x10];
u8 height_sigma[0x10];
- u8 reserved_5[0x20];
+ u8 reserved_at_c0[0x20];
- u8 reserved_6[0x4];
+ u8 reserved_at_e0[0x4];
u8 phase_grade_type[0x4];
u8 phase_grade[0x18];
- u8 reserved_7[0x8];
+ u8 reserved_at_100[0x8];
u8 phase_eo_pos[0x8];
- u8 reserved_8[0x8];
+ u8 reserved_at_110[0x8];
u8 phase_eo_neg[0x8];
u8 ffe_set_tested[0x10];
@@ -6221,70 +6221,70 @@
};
struct mlx5_ifc_pvlc_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_2[0x1c];
+ u8 reserved_at_20[0x1c];
u8 vl_hw_cap[0x4];
- u8 reserved_3[0x1c];
+ u8 reserved_at_40[0x1c];
u8 vl_admin[0x4];
- u8 reserved_4[0x1c];
+ u8 reserved_at_60[0x1c];
u8 vl_operational[0x4];
};
struct mlx5_ifc_pude_reg_bits {
u8 swid[0x8];
u8 local_port[0x8];
- u8 reserved_0[0x4];
+ u8 reserved_at_10[0x4];
u8 admin_status[0x4];
- u8 reserved_1[0x4];
+ u8 reserved_at_18[0x4];
u8 oper_status[0x4];
- u8 reserved_2[0x60];
+ u8 reserved_at_20[0x60];
};
struct mlx5_ifc_ptys_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0xd];
+ u8 reserved_at_10[0xd];
u8 proto_mask[0x3];
- u8 reserved_2[0x40];
+ u8 reserved_at_20[0x40];
u8 eth_proto_capability[0x20];
u8 ib_link_width_capability[0x10];
u8 ib_proto_capability[0x10];
- u8 reserved_3[0x20];
+ u8 reserved_at_a0[0x20];
u8 eth_proto_admin[0x20];
u8 ib_link_width_admin[0x10];
u8 ib_proto_admin[0x10];
- u8 reserved_4[0x20];
+ u8 reserved_at_100[0x20];
u8 eth_proto_oper[0x20];
u8 ib_link_width_oper[0x10];
u8 ib_proto_oper[0x10];
- u8 reserved_5[0x20];
+ u8 reserved_at_160[0x20];
u8 eth_proto_lp_advertise[0x20];
- u8 reserved_6[0x60];
+ u8 reserved_at_1a0[0x60];
};
struct mlx5_ifc_ptas_reg_bits {
- u8 reserved_0[0x20];
+ u8 reserved_at_0[0x20];
u8 algorithm_options[0x10];
- u8 reserved_1[0x4];
+ u8 reserved_at_30[0x4];
u8 repetitions_mode[0x4];
u8 num_of_repetitions[0x8];
@@ -6310,13 +6310,13 @@
u8 ndeo_error_threshold[0x10];
u8 mixer_offset_step_size[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_110[0x8];
u8 mix90_phase_for_voltage_bath[0x8];
u8 mixer_offset_start[0x10];
u8 mixer_offset_end[0x10];
- u8 reserved_3[0x15];
+ u8 reserved_at_140[0x15];
u8 ber_test_time[0xb];
};
@@ -6324,154 +6324,154 @@
u8 swid[0x8];
u8 local_port[0x8];
u8 sub_port[0x8];
- u8 reserved_0[0x8];
+ u8 reserved_at_18[0x8];
- u8 reserved_1[0x20];
+ u8 reserved_at_20[0x20];
};
struct mlx5_ifc_pqdr_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x5];
+ u8 reserved_at_10[0x5];
u8 prio[0x3];
- u8 reserved_2[0x6];
+ u8 reserved_at_18[0x6];
u8 mode[0x2];
- u8 reserved_3[0x20];
+ u8 reserved_at_20[0x20];
- u8 reserved_4[0x10];
+ u8 reserved_at_40[0x10];
u8 min_threshold[0x10];
- u8 reserved_5[0x10];
+ u8 reserved_at_60[0x10];
u8 max_threshold[0x10];
- u8 reserved_6[0x10];
+ u8 reserved_at_80[0x10];
u8 mark_probability_denominator[0x10];
- u8 reserved_7[0x60];
+ u8 reserved_at_a0[0x60];
};
struct mlx5_ifc_ppsc_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_2[0x60];
+ u8 reserved_at_20[0x60];
- u8 reserved_3[0x1c];
+ u8 reserved_at_80[0x1c];
u8 wrps_admin[0x4];
- u8 reserved_4[0x1c];
+ u8 reserved_at_a0[0x1c];
u8 wrps_status[0x4];
- u8 reserved_5[0x8];
+ u8 reserved_at_c0[0x8];
u8 up_threshold[0x8];
- u8 reserved_6[0x8];
+ u8 reserved_at_d0[0x8];
u8 down_threshold[0x8];
- u8 reserved_7[0x20];
+ u8 reserved_at_e0[0x20];
- u8 reserved_8[0x1c];
+ u8 reserved_at_100[0x1c];
u8 srps_admin[0x4];
- u8 reserved_9[0x1c];
+ u8 reserved_at_120[0x1c];
u8 srps_status[0x4];
- u8 reserved_10[0x40];
+ u8 reserved_at_140[0x40];
};
struct mlx5_ifc_pplr_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_2[0x8];
+ u8 reserved_at_20[0x8];
u8 lb_cap[0x8];
- u8 reserved_3[0x8];
+ u8 reserved_at_30[0x8];
u8 lb_en[0x8];
};
struct mlx5_ifc_pplm_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_2[0x20];
+ u8 reserved_at_20[0x20];
u8 port_profile_mode[0x8];
u8 static_port_profile[0x8];
u8 active_port_profile[0x8];
- u8 reserved_3[0x8];
+ u8 reserved_at_58[0x8];
u8 retransmission_active[0x8];
u8 fec_mode_active[0x18];
- u8 reserved_4[0x20];
+ u8 reserved_at_80[0x20];
};
struct mlx5_ifc_ppcnt_reg_bits {
u8 swid[0x8];
u8 local_port[0x8];
u8 pnat[0x2];
- u8 reserved_0[0x8];
+ u8 reserved_at_12[0x8];
u8 grp[0x6];
u8 clr[0x1];
- u8 reserved_1[0x1c];
+ u8 reserved_at_21[0x1c];
u8 prio_tc[0x3];
union mlx5_ifc_eth_cntrs_grp_data_layout_auto_bits counter_set;
};
struct mlx5_ifc_ppad_reg_bits {
- u8 reserved_0[0x3];
+ u8 reserved_at_0[0x3];
u8 single_mac[0x1];
- u8 reserved_1[0x4];
+ u8 reserved_at_4[0x4];
u8 local_port[0x8];
u8 mac_47_32[0x10];
u8 mac_31_0[0x20];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_pmtu_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
u8 max_mtu[0x10];
- u8 reserved_2[0x10];
+ u8 reserved_at_30[0x10];
u8 admin_mtu[0x10];
- u8 reserved_3[0x10];
+ u8 reserved_at_50[0x10];
u8 oper_mtu[0x10];
- u8 reserved_4[0x10];
+ u8 reserved_at_70[0x10];
};
struct mlx5_ifc_pmpr_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 module[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_2[0x18];
+ u8 reserved_at_20[0x18];
u8 attenuation_5g[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_40[0x18];
u8 attenuation_7g[0x8];
- u8 reserved_4[0x18];
+ u8 reserved_at_60[0x18];
u8 attenuation_12g[0x8];
};
struct mlx5_ifc_pmpe_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 module[0x8];
- u8 reserved_1[0xc];
+ u8 reserved_at_10[0xc];
u8 module_status[0x4];
- u8 reserved_2[0x60];
+ u8 reserved_at_20[0x60];
};
struct mlx5_ifc_pmpc_reg_bits {
@@ -6479,20 +6479,20 @@
};
struct mlx5_ifc_pmlpn_reg_bits {
- u8 reserved_0[0x4];
+ u8 reserved_at_0[0x4];
u8 mlpn_status[0x4];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
u8 e[0x1];
- u8 reserved_2[0x1f];
+ u8 reserved_at_21[0x1f];
};
struct mlx5_ifc_pmlp_reg_bits {
u8 rxtx[0x1];
- u8 reserved_0[0x7];
+ u8 reserved_at_1[0x7];
u8 local_port[0x8];
- u8 reserved_1[0x8];
+ u8 reserved_at_10[0x8];
u8 width[0x8];
u8 lane0_module_mapping[0x20];
@@ -6503,36 +6503,36 @@
u8 lane3_module_mapping[0x20];
- u8 reserved_2[0x160];
+ u8 reserved_at_a0[0x160];
};
struct mlx5_ifc_pmaos_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 module[0x8];
- u8 reserved_1[0x4];
+ u8 reserved_at_10[0x4];
u8 admin_status[0x4];
- u8 reserved_2[0x4];
+ u8 reserved_at_18[0x4];
u8 oper_status[0x4];
u8 ase[0x1];
u8 ee[0x1];
- u8 reserved_3[0x1c];
+ u8 reserved_at_22[0x1c];
u8 e[0x2];
- u8 reserved_4[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_plpc_reg_bits {
- u8 reserved_0[0x4];
+ u8 reserved_at_0[0x4];
u8 profile_id[0xc];
- u8 reserved_1[0x4];
+ u8 reserved_at_10[0x4];
u8 proto_mask[0x4];
- u8 reserved_2[0x8];
+ u8 reserved_at_18[0x8];
- u8 reserved_3[0x10];
+ u8 reserved_at_20[0x10];
u8 lane_speed[0x10];
- u8 reserved_4[0x17];
+ u8 reserved_at_40[0x17];
u8 lpbf[0x1];
u8 fec_mode_policy[0x8];
@@ -6545,44 +6545,44 @@
u8 retransmission_request_admin[0x8];
u8 fec_mode_request_admin[0x18];
- u8 reserved_5[0x80];
+ u8 reserved_at_c0[0x80];
};
struct mlx5_ifc_plib_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x8];
+ u8 reserved_at_10[0x8];
u8 ib_port[0x8];
- u8 reserved_2[0x60];
+ u8 reserved_at_20[0x60];
};
struct mlx5_ifc_plbf_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0xd];
+ u8 reserved_at_10[0xd];
u8 lbf_mode[0x3];
- u8 reserved_2[0x20];
+ u8 reserved_at_20[0x20];
};
struct mlx5_ifc_pipg_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
u8 dic[0x1];
- u8 reserved_2[0x19];
+ u8 reserved_at_21[0x19];
u8 ipg[0x4];
- u8 reserved_3[0x2];
+ u8 reserved_at_3e[0x2];
};
struct mlx5_ifc_pifr_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_2[0xe0];
+ u8 reserved_at_20[0xe0];
u8 port_filter[8][0x20];
@@ -6590,36 +6590,36 @@
};
struct mlx5_ifc_pfcc_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
u8 ppan[0x4];
- u8 reserved_2[0x4];
+ u8 reserved_at_24[0x4];
u8 prio_mask_tx[0x8];
- u8 reserved_3[0x8];
+ u8 reserved_at_30[0x8];
u8 prio_mask_rx[0x8];
u8 pptx[0x1];
u8 aptx[0x1];
- u8 reserved_4[0x6];
+ u8 reserved_at_42[0x6];
u8 pfctx[0x8];
- u8 reserved_5[0x10];
+ u8 reserved_at_50[0x10];
u8 pprx[0x1];
u8 aprx[0x1];
- u8 reserved_6[0x6];
+ u8 reserved_at_62[0x6];
u8 pfcrx[0x8];
- u8 reserved_7[0x10];
+ u8 reserved_at_70[0x10];
- u8 reserved_8[0x80];
+ u8 reserved_at_80[0x80];
};
struct mlx5_ifc_pelc_reg_bits {
u8 op[0x4];
- u8 reserved_0[0x4];
+ u8 reserved_at_4[0x4];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
u8 op_admin[0x8];
u8 op_capability[0x8];
@@ -6634,28 +6634,28 @@
u8 active[0x40];
- u8 reserved_2[0x80];
+ u8 reserved_at_140[0x80];
};
struct mlx5_ifc_peir_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_2[0xc];
+ u8 reserved_at_20[0xc];
u8 error_count[0x4];
- u8 reserved_3[0x10];
+ u8 reserved_at_30[0x10];
- u8 reserved_4[0xc];
+ u8 reserved_at_40[0xc];
u8 lane[0x4];
- u8 reserved_5[0x8];
+ u8 reserved_at_50[0x8];
u8 error_type[0x8];
};
struct mlx5_ifc_pcap_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 local_port[0x8];
- u8 reserved_1[0x10];
+ u8 reserved_at_10[0x10];
u8 port_capability_mask[4][0x20];
};
@@ -6663,46 +6663,46 @@
struct mlx5_ifc_paos_reg_bits {
u8 swid[0x8];
u8 local_port[0x8];
- u8 reserved_0[0x4];
+ u8 reserved_at_10[0x4];
u8 admin_status[0x4];
- u8 reserved_1[0x4];
+ u8 reserved_at_18[0x4];
u8 oper_status[0x4];
u8 ase[0x1];
u8 ee[0x1];
- u8 reserved_2[0x1c];
+ u8 reserved_at_22[0x1c];
u8 e[0x2];
- u8 reserved_3[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_pamp_reg_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 opamp_group[0x8];
- u8 reserved_1[0xc];
+ u8 reserved_at_10[0xc];
u8 opamp_group_type[0x4];
u8 start_index[0x10];
- u8 reserved_2[0x4];
+ u8 reserved_at_30[0x4];
u8 num_of_indices[0xc];
u8 index_data[18][0x10];
};
struct mlx5_ifc_lane_2_module_mapping_bits {
- u8 reserved_0[0x6];
+ u8 reserved_at_0[0x6];
u8 rx_lane[0x2];
- u8 reserved_1[0x6];
+ u8 reserved_at_8[0x6];
u8 tx_lane[0x2];
- u8 reserved_2[0x8];
+ u8 reserved_at_10[0x8];
u8 module[0x8];
};
struct mlx5_ifc_bufferx_reg_bits {
- u8 reserved_0[0x6];
+ u8 reserved_at_0[0x6];
u8 lossy[0x1];
u8 epsb[0x1];
- u8 reserved_1[0xc];
+ u8 reserved_at_8[0xc];
u8 size[0xc];
u8 xoff_threshold[0x10];
@@ -6714,21 +6714,21 @@
};
struct mlx5_ifc_register_power_settings_bits {
- u8 reserved_0[0x18];
+ u8 reserved_at_0[0x18];
u8 power_settings_level[0x8];
- u8 reserved_1[0x60];
+ u8 reserved_at_20[0x60];
};
struct mlx5_ifc_register_host_endianness_bits {
u8 he[0x1];
- u8 reserved_0[0x1f];
+ u8 reserved_at_1[0x1f];
- u8 reserved_1[0x60];
+ u8 reserved_at_20[0x60];
};
struct mlx5_ifc_umr_pointer_desc_argument_bits {
- u8 reserved_0[0x20];
+ u8 reserved_at_0[0x20];
u8 mkey[0x20];
@@ -6741,7 +6741,7 @@
u8 dc_key[0x40];
u8 ext[0x1];
- u8 reserved_0[0x7];
+ u8 reserved_at_41[0x7];
u8 destination_qp_dct[0x18];
u8 static_rate[0x4];
@@ -6750,7 +6750,7 @@
u8 mlid[0x7];
u8 rlid_udp_sport[0x10];
- u8 reserved_1[0x20];
+ u8 reserved_at_80[0x20];
u8 rmac_47_16[0x20];
@@ -6758,9 +6758,9 @@
u8 tclass[0x8];
u8 hop_limit[0x8];
- u8 reserved_2[0x1];
+ u8 reserved_at_e0[0x1];
u8 grh[0x1];
- u8 reserved_3[0x2];
+ u8 reserved_at_e2[0x2];
u8 src_addr_index[0x8];
u8 flow_label[0x14];
@@ -6768,27 +6768,27 @@
};
struct mlx5_ifc_pages_req_event_bits {
- u8 reserved_0[0x10];
+ u8 reserved_at_0[0x10];
u8 function_id[0x10];
u8 num_pages[0x20];
- u8 reserved_1[0xa0];
+ u8 reserved_at_40[0xa0];
};
struct mlx5_ifc_eqe_bits {
- u8 reserved_0[0x8];
+ u8 reserved_at_0[0x8];
u8 event_type[0x8];
- u8 reserved_1[0x8];
+ u8 reserved_at_10[0x8];
u8 event_sub_type[0x8];
- u8 reserved_2[0xe0];
+ u8 reserved_at_20[0xe0];
union mlx5_ifc_event_auto_bits event_data;
- u8 reserved_3[0x10];
+ u8 reserved_at_1e0[0x10];
u8 signature[0x8];
- u8 reserved_4[0x7];
+ u8 reserved_at_1f8[0x7];
u8 owner[0x1];
};
@@ -6798,14 +6798,14 @@
struct mlx5_ifc_cmd_queue_entry_bits {
u8 type[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 input_length[0x20];
u8 input_mailbox_pointer_63_32[0x20];
u8 input_mailbox_pointer_31_9[0x17];
- u8 reserved_1[0x9];
+ u8 reserved_at_77[0x9];
u8 command_input_inline_data[16][0x8];
@@ -6814,20 +6814,20 @@
u8 output_mailbox_pointer_63_32[0x20];
u8 output_mailbox_pointer_31_9[0x17];
- u8 reserved_2[0x9];
+ u8 reserved_at_1b7[0x9];
u8 output_length[0x20];
u8 token[0x8];
u8 signature[0x8];
- u8 reserved_3[0x8];
+ u8 reserved_at_1f0[0x8];
u8 status[0x7];
u8 ownership[0x1];
};
struct mlx5_ifc_cmd_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
@@ -6836,9 +6836,9 @@
struct mlx5_ifc_cmd_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
u8 command[0][0x20];
@@ -6847,16 +6847,16 @@
struct mlx5_ifc_cmd_if_box_bits {
u8 mailbox_data[512][0x8];
- u8 reserved_0[0x180];
+ u8 reserved_at_1000[0x180];
u8 next_pointer_63_32[0x20];
u8 next_pointer_31_10[0x16];
- u8 reserved_1[0xa];
+ u8 reserved_at_11b6[0xa];
u8 block_number[0x20];
- u8 reserved_2[0x8];
+ u8 reserved_at_11e0[0x8];
u8 token[0x8];
u8 ctrl_signature[0x8];
u8 signature[0x8];
@@ -6866,7 +6866,7 @@
u8 ptag_63_32[0x20];
u8 ptag_31_8[0x18];
- u8 reserved_0[0x6];
+ u8 reserved_at_38[0x6];
u8 wr_en[0x1];
u8 rd_en[0x1];
};
@@ -6904,38 +6904,38 @@
u8 cmd_interface_rev[0x10];
u8 fw_rev_subminor[0x10];
- u8 reserved_0[0x40];
+ u8 reserved_at_40[0x40];
u8 cmdq_phy_addr_63_32[0x20];
u8 cmdq_phy_addr_31_12[0x14];
- u8 reserved_1[0x2];
+ u8 reserved_at_b4[0x2];
u8 nic_interface[0x2];
u8 log_cmdq_size[0x4];
u8 log_cmdq_stride[0x4];
u8 command_doorbell_vector[0x20];
- u8 reserved_2[0xf00];
+ u8 reserved_at_e0[0xf00];
u8 initializing[0x1];
- u8 reserved_3[0x4];
+ u8 reserved_at_fe1[0x4];
u8 nic_interface_supported[0x3];
- u8 reserved_4[0x18];
+ u8 reserved_at_fe8[0x18];
struct mlx5_ifc_health_buffer_bits health_buffer;
u8 no_dram_nic_offset[0x20];
- u8 reserved_5[0x6e40];
+ u8 reserved_at_1220[0x6e40];
- u8 reserved_6[0x1f];
+ u8 reserved_at_8060[0x1f];
u8 clear_int[0x1];
u8 health_syndrome[0x8];
u8 health_counter[0x18];
- u8 reserved_7[0x17fc0];
+ u8 reserved_at_80a0[0x17fc0];
};
union mlx5_ifc_ports_control_registers_document_bits {
@@ -6980,44 +6980,44 @@
struct mlx5_ifc_pvlc_reg_bits pvlc_reg;
struct mlx5_ifc_slrg_reg_bits slrg_reg;
struct mlx5_ifc_sltp_reg_bits sltp_reg;
- u8 reserved_0[0x60e0];
+ u8 reserved_at_0[0x60e0];
};
union mlx5_ifc_debug_enhancements_document_bits {
struct mlx5_ifc_health_buffer_bits health_buffer;
- u8 reserved_0[0x200];
+ u8 reserved_at_0[0x200];
};
union mlx5_ifc_uplink_pci_interface_document_bits {
struct mlx5_ifc_initial_seg_bits initial_seg;
- u8 reserved_0[0x20060];
+ u8 reserved_at_0[0x20060];
};
struct mlx5_ifc_set_flow_table_root_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_set_flow_table_root_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x40];
+ u8 reserved_at_40[0x40];
u8 table_type[0x8];
- u8 reserved_3[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_4[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_5[0x140];
+ u8 reserved_at_c0[0x140];
};
enum {
@@ -7026,39 +7026,39 @@
struct mlx5_ifc_modify_flow_table_out_bits {
u8 status[0x8];
- u8 reserved_0[0x18];
+ u8 reserved_at_8[0x18];
u8 syndrome[0x20];
- u8 reserved_1[0x40];
+ u8 reserved_at_40[0x40];
};
struct mlx5_ifc_modify_flow_table_in_bits {
u8 opcode[0x10];
- u8 reserved_0[0x10];
+ u8 reserved_at_10[0x10];
- u8 reserved_1[0x10];
+ u8 reserved_at_20[0x10];
u8 op_mod[0x10];
- u8 reserved_2[0x20];
+ u8 reserved_at_40[0x20];
- u8 reserved_3[0x10];
+ u8 reserved_at_60[0x10];
u8 modify_field_select[0x10];
u8 table_type[0x8];
- u8 reserved_4[0x18];
+ u8 reserved_at_88[0x18];
- u8 reserved_5[0x8];
+ u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_6[0x4];
+ u8 reserved_at_c0[0x4];
u8 table_miss_mode[0x4];
- u8 reserved_7[0x18];
+ u8 reserved_at_c8[0x18];
- u8 reserved_8[0x8];
+ u8 reserved_at_e0[0x8];
u8 table_miss_id[0x18];
- u8 reserved_9[0x100];
+ u8 reserved_at_100[0x100];
};
#endif /* MLX5_IFC_H */
diff --git a/include/linux/module.h b/include/linux/module.h
index 4560d8f..2bb0c30 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -324,6 +324,12 @@
#define __module_layout_align
#endif
+struct mod_kallsyms {
+ Elf_Sym *symtab;
+ unsigned int num_symtab;
+ char *strtab;
+};
+
struct module {
enum module_state state;
@@ -405,15 +411,10 @@
#endif
#ifdef CONFIG_KALLSYMS
- /*
- * We keep the symbol and string tables for kallsyms.
- * The core_* fields below are temporary, loader-only (they
- * could really be discarded after module init).
- */
- Elf_Sym *symtab, *core_symtab;
- unsigned int num_symtab, core_num_syms;
- char *strtab, *core_strtab;
-
+ /* Protected by RCU and/or module_mutex: use rcu_dereference() */
+ struct mod_kallsyms *kallsyms;
+ struct mod_kallsyms core_kallsyms;
+
/* Section attributes */
struct module_sect_attrs *sect_attrs;
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 289c231..5440b7b 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -3718,7 +3718,7 @@
void *netdev_lower_get_next(struct net_device *dev,
struct list_head **iter);
#define netdev_for_each_lower_dev(dev, ldev, iter) \
- for (iter = &(dev)->adj_list.lower, \
+ for (iter = (dev)->adj_list.lower.next, \
ldev = netdev_lower_get_next(dev, &(iter)); \
ldev; \
ldev = netdev_lower_get_next(dev, &(iter)))
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 48e0320..67300f8 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -550,9 +550,7 @@
static inline loff_t nfs_size_to_loff_t(__u64 size)
{
- if (size > (__u64) OFFSET_MAX - 1)
- return OFFSET_MAX - 1;
- return (loff_t) size;
+ return min_t(u64, size, OFFSET_MAX);
}
static inline ino_t
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 791098a..d320906 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -275,6 +275,7 @@
size_t layoutupdate_len;
struct page *layoutupdate_page;
struct page **layoutupdate_pages;
+ __be32 *start_p;
};
struct nfs4_layoutcommit_res {
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 27df4a6..2771625 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -988,23 +988,6 @@
return pdev->is_managed;
}
-static inline void pci_set_managed_irq(struct pci_dev *pdev, unsigned int irq)
-{
- pdev->irq = irq;
- pdev->irq_managed = 1;
-}
-
-static inline void pci_reset_managed_irq(struct pci_dev *pdev)
-{
- pdev->irq = 0;
- pdev->irq_managed = 0;
-}
-
-static inline bool pci_has_managed_irq(struct pci_dev *pdev)
-{
- return pdev->irq_managed && pdev->irq > 0;
-}
-
void pci_disable_device(struct pci_dev *dev);
extern unsigned int pcibios_max_latency;
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index b35a61a..f5c5a3f 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -397,6 +397,7 @@
* enum perf_event_active_state - the states of a event
*/
enum perf_event_active_state {
+ PERF_EVENT_STATE_DEAD = -4,
PERF_EVENT_STATE_EXIT = -3,
PERF_EVENT_STATE_ERROR = -2,
PERF_EVENT_STATE_OFF = -1,
@@ -905,7 +906,7 @@
}
}
-extern struct static_key_deferred perf_sched_events;
+extern struct static_key_false perf_sched_events;
static __always_inline bool
perf_sw_migrate_enabled(void)
@@ -924,7 +925,7 @@
static inline void perf_event_task_sched_in(struct task_struct *prev,
struct task_struct *task)
{
- if (static_key_false(&perf_sched_events.key))
+ if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_in(prev, task);
if (perf_sw_migrate_enabled() && task->sched_migrated) {
@@ -941,7 +942,7 @@
{
perf_sw_event_sched(PERF_COUNT_SW_CONTEXT_SWITCHES, 1, 0);
- if (static_key_false(&perf_sched_events.key))
+ if (static_branch_unlikely(&perf_sched_events))
__perf_event_task_sched_out(prev, next);
}
diff --git a/include/linux/pfn.h b/include/linux/pfn.h
index 2d8e497..1132953 100644
--- a/include/linux/pfn.h
+++ b/include/linux/pfn.h
@@ -10,7 +10,7 @@
* backing is indicated by flags in the high bits of the value.
*/
typedef struct {
- unsigned long val;
+ u64 val;
} pfn_t;
#endif
diff --git a/include/linux/pfn_t.h b/include/linux/pfn_t.h
index 37448ab..9499481 100644
--- a/include/linux/pfn_t.h
+++ b/include/linux/pfn_t.h
@@ -9,14 +9,13 @@
* PFN_DEV - pfn is not covered by system memmap by default
* PFN_MAP - pfn has a dynamic page mapping established by a device driver
*/
-#define PFN_FLAGS_MASK (((unsigned long) ~PAGE_MASK) \
- << (BITS_PER_LONG - PAGE_SHIFT))
-#define PFN_SG_CHAIN (1UL << (BITS_PER_LONG - 1))
-#define PFN_SG_LAST (1UL << (BITS_PER_LONG - 2))
-#define PFN_DEV (1UL << (BITS_PER_LONG - 3))
-#define PFN_MAP (1UL << (BITS_PER_LONG - 4))
+#define PFN_FLAGS_MASK (((u64) ~PAGE_MASK) << (BITS_PER_LONG_LONG - PAGE_SHIFT))
+#define PFN_SG_CHAIN (1ULL << (BITS_PER_LONG_LONG - 1))
+#define PFN_SG_LAST (1ULL << (BITS_PER_LONG_LONG - 2))
+#define PFN_DEV (1ULL << (BITS_PER_LONG_LONG - 3))
+#define PFN_MAP (1ULL << (BITS_PER_LONG_LONG - 4))
-static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, unsigned long flags)
+static inline pfn_t __pfn_to_pfn_t(unsigned long pfn, u64 flags)
{
pfn_t pfn_t = { .val = pfn | (flags & PFN_FLAGS_MASK), };
@@ -29,7 +28,7 @@
return __pfn_to_pfn_t(pfn, 0);
}
-extern pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags);
+extern pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags);
static inline bool pfn_t_has_page(pfn_t pfn)
{
@@ -87,7 +86,7 @@
#ifdef __HAVE_ARCH_PTE_DEVMAP
static inline bool pfn_t_devmap(pfn_t pfn)
{
- const unsigned long flags = PFN_DEV|PFN_MAP;
+ const u64 flags = PFN_DEV|PFN_MAP;
return (pfn.val & flags) == flags;
}
diff --git a/include/linux/power/bq27xxx_battery.h b/include/linux/power/bq27xxx_battery.h
index 998d8f1..b50c049 100644
--- a/include/linux/power/bq27xxx_battery.h
+++ b/include/linux/power/bq27xxx_battery.h
@@ -49,6 +49,7 @@
struct bq27xxx_device_info {
struct device *dev;
+ int id;
enum bq27xxx_chip chip;
const char *name;
struct bq27xxx_access_methods bus;
diff --git a/include/linux/random.h b/include/linux/random.h
index a75840c..9c29122 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -34,6 +34,7 @@
#endif
unsigned int get_random_int(void);
+unsigned long get_random_long(void);
unsigned long randomize_range(unsigned long start, unsigned long end, unsigned long len);
u32 prandom_u32(void);
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 11f935c..4ce9ff7 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -299,6 +299,7 @@
#else
#define MAX_SKB_FRAGS (65536/PAGE_SIZE + 1)
#endif
+extern int sysctl_max_skb_frags;
typedef struct skb_frag_struct skb_frag_t;
diff --git a/include/linux/soc/ti/knav_dma.h b/include/linux/soc/ti/knav_dma.h
index 343c13a..35cb926 100644
--- a/include/linux/soc/ti/knav_dma.h
+++ b/include/linux/soc/ti/knav_dma.h
@@ -44,6 +44,7 @@
#define KNAV_DMA_NUM_EPIB_WORDS 4
#define KNAV_DMA_NUM_PS_WORDS 16
+#define KNAV_DMA_NUM_SW_DATA_WORDS 4
#define KNAV_DMA_FDQ_PER_CHAN 4
/* Tx channel scheduling priority */
@@ -142,6 +143,7 @@
* @orig_buff: buff pointer since 'buff' can be overwritten
* @epib: Extended packet info block
* @psdata: Protocol specific
+ * @sw_data: Software private data not touched by h/w
*/
struct knav_dma_desc {
__le32 desc_info;
@@ -154,7 +156,7 @@
__le32 orig_buff;
__le32 epib[KNAV_DMA_NUM_EPIB_WORDS];
__le32 psdata[KNAV_DMA_NUM_PS_WORDS];
- __le32 pad[4];
+ u32 sw_data[KNAV_DMA_NUM_SW_DATA_WORDS];
} ____cacheline_aligned;
#if IS_ENABLED(CONFIG_KEYSTONE_NAVIGATOR_DMA)
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index acd522a..acfdbf3 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -14,8 +14,10 @@
* See the file COPYING for more details.
*/
+#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/types.h>
+#include <linux/cpumask.h>
#include <linux/rcupdate.h>
#include <linux/tracepoint-defs.h>
@@ -132,6 +134,9 @@
void *it_func; \
void *__data; \
\
+ if (!cpu_online(raw_smp_processor_id())) \
+ return; \
+ \
if (!(cond)) \
return; \
prercu; \
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
index cbb20af..bb679b4 100644
--- a/include/linux/ucs2_string.h
+++ b/include/linux/ucs2_string.h
@@ -11,4 +11,8 @@
unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
+unsigned long ucs2_utf8size(const ucs2_char_t *src);
+unsigned long ucs2_as_utf8(u8 *dest, const ucs2_char_t *src,
+ unsigned long maxlength);
+
#endif /* _LINUX_UCS2_STRING_H_ */
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 0e32bc7..ca73c50 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -311,6 +311,7 @@
__WQ_DRAINING = 1 << 16, /* internal: workqueue is draining */
__WQ_ORDERED = 1 << 17, /* internal: workqueue is ordered */
+ __WQ_LEGACY = 1 << 18, /* internal: create*_workqueue() */
WQ_MAX_ACTIVE = 512, /* I like 512, better ideas? */
WQ_MAX_UNBOUND_PER_CPU = 4, /* 4 * #cpus for unbound wq */
@@ -411,12 +412,12 @@
alloc_workqueue(fmt, WQ_UNBOUND | __WQ_ORDERED | (flags), 1, ##args)
#define create_workqueue(name) \
- alloc_workqueue("%s", WQ_MEM_RECLAIM, 1, (name))
+ alloc_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, 1, (name))
#define create_freezable_workqueue(name) \
- alloc_workqueue("%s", WQ_FREEZABLE | WQ_UNBOUND | WQ_MEM_RECLAIM, \
- 1, (name))
+ alloc_workqueue("%s", __WQ_LEGACY | WQ_FREEZABLE | WQ_UNBOUND | \
+ WQ_MEM_RECLAIM, 1, (name))
#define create_singlethread_workqueue(name) \
- alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name)
+ alloc_ordered_workqueue("%s", __WQ_LEGACY | WQ_MEM_RECLAIM, name)
extern void destroy_workqueue(struct workqueue_struct *wq);
diff --git a/include/net/af_unix.h b/include/net/af_unix.h
index 2a91a05..9b4c418 100644
--- a/include/net/af_unix.h
+++ b/include/net/af_unix.h
@@ -6,8 +6,8 @@
#include <linux/mutex.h>
#include <net/sock.h>
-void unix_inflight(struct file *fp);
-void unix_notinflight(struct file *fp);
+void unix_inflight(struct user_struct *user, struct file *fp);
+void unix_notinflight(struct user_struct *user, struct file *fp);
void unix_gc(void);
void wait_for_unix_gc(void);
struct sock *unix_get_socket(struct file *filp);
diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
index 481fe1c..49dcad4 100644
--- a/include/net/inet_connection_sock.h
+++ b/include/net/inet_connection_sock.h
@@ -270,8 +270,9 @@
struct sock *newsk,
const struct request_sock *req);
-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
- struct sock *child);
+struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+ struct request_sock *req,
+ struct sock *child);
void inet_csk_reqsk_queue_hash_add(struct sock *sk, struct request_sock *req,
unsigned long timeout);
struct sock *inet_csk_complete_hashdance(struct sock *sk, struct sock *child,
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 7029527..4079fc1 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -61,6 +61,7 @@
struct rtable __rcu *fnhe_rth_input;
struct rtable __rcu *fnhe_rth_output;
unsigned long fnhe_stamp;
+ struct rcu_head rcu;
};
struct fnhe_hash_bucket {
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 6db96ea..dda9abf 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -230,6 +230,7 @@
int ip_tunnel_ioctl(struct net_device *dev, struct ip_tunnel_parm *p, int cmd);
int ip_tunnel_encap(struct sk_buff *skb, struct ip_tunnel *t,
u8 *protocol, struct flowi4 *fl4);
+int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict);
int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu);
struct rtnl_link_stats64 *ip_tunnel_get_stats64(struct net_device *dev,
diff --git a/include/net/scm.h b/include/net/scm.h
index 262532d..59fa93c 100644
--- a/include/net/scm.h
+++ b/include/net/scm.h
@@ -21,6 +21,7 @@
struct scm_fp_list {
short count;
short max;
+ struct user_struct *user;
struct file *fp[SCM_MAX_FD];
};
diff --git a/include/net/tcp.h b/include/net/tcp.h
index f6f8f03..ae6468f 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -447,7 +447,7 @@
void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
void tcp_v4_mtu_reduced(struct sock *sk);
-void tcp_req_err(struct sock *sk, u32 seq);
+void tcp_req_err(struct sock *sk, u32 seq, bool abort);
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
struct sock *tcp_create_openreq_child(const struct sock *sk,
struct request_sock *req,
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index e2b712c..c21c38c 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -343,7 +343,7 @@
void snd_hdac_bus_exit_link_reset(struct hdac_bus *bus);
void snd_hdac_bus_update_rirb(struct hdac_bus *bus);
-void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
+int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
void (*ack)(struct hdac_bus *,
struct hdac_stream *));
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index 56cf8e4..28ee5c2 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -94,5 +94,8 @@
sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
bool target_sense_desc_format(struct se_device *dev);
+sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
+bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
+ struct request_queue *q, int block_size);
#endif /* TARGET_CORE_BACKEND_H */
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 5d82816..e8c8c08 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -140,6 +140,8 @@
SCF_COMPARE_AND_WRITE = 0x00080000,
SCF_COMPARE_AND_WRITE_POST = 0x00100000,
SCF_PASSTHROUGH_PROT_SG_TO_MEM_NOALLOC = 0x00200000,
+ SCF_ACK_KREF = 0x00400000,
+ SCF_USE_CPUID = 0x00800000,
};
/* struct se_dev_entry->lun_flags and struct se_lun->lun_access */
@@ -187,6 +189,7 @@
TARGET_SCF_BIDI_OP = 0x01,
TARGET_SCF_ACK_KREF = 0x02,
TARGET_SCF_UNKNOWN_SIZE = 0x04,
+ TARGET_SCF_USE_CPUID = 0x08,
};
/* fabric independent task management function values */
@@ -490,8 +493,9 @@
#define CMD_T_SENT (1 << 4)
#define CMD_T_STOP (1 << 5)
#define CMD_T_DEV_ACTIVE (1 << 7)
-#define CMD_T_REQUEST_STOP (1 << 8)
#define CMD_T_BUSY (1 << 9)
+#define CMD_T_TAS (1 << 10)
+#define CMD_T_FABRIC_STOP (1 << 11)
spinlock_t t_state_lock;
struct kref cmd_kref;
struct completion t_transport_stop_comp;
@@ -511,9 +515,6 @@
struct list_head state_list;
- /* old task stop completion, consider merging with some of the above */
- struct completion task_stop_comp;
-
/* backend private data */
void *priv;
diff --git a/include/trace/events/kvm.h b/include/trace/events/kvm.h
index d6f8322..aa69253 100644
--- a/include/trace/events/kvm.h
+++ b/include/trace/events/kvm.h
@@ -359,14 +359,15 @@
#endif
TRACE_EVENT(kvm_halt_poll_ns,
- TP_PROTO(bool grow, unsigned int vcpu_id, int new, int old),
+ TP_PROTO(bool grow, unsigned int vcpu_id, unsigned int new,
+ unsigned int old),
TP_ARGS(grow, vcpu_id, new, old),
TP_STRUCT__entry(
__field(bool, grow)
__field(unsigned int, vcpu_id)
- __field(int, new)
- __field(int, old)
+ __field(unsigned int, new)
+ __field(unsigned int, old)
),
TP_fast_assign(
@@ -376,7 +377,7 @@
__entry->old = old;
),
- TP_printk("vcpu %u: halt_poll_ns %d (%s %d)",
+ TP_printk("vcpu %u: halt_poll_ns %u (%s %u)",
__entry->vcpu_id,
__entry->new,
__entry->grow ? "grow" : "shrink",
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index a2fe0ac..a7f1f80 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -157,6 +157,7 @@
struct kvm_hyperv_exit {
#define KVM_EXIT_HYPERV_SYNIC 1
+#define KVM_EXIT_HYPERV_HCALL 2
__u32 type;
union {
struct {
@@ -165,6 +166,11 @@
__u64 evt_page;
__u64 msg_page;
} synic;
+ struct {
+ __u64 input;
+ __u64 result;
+ __u64 params[2];
+ } hcall;
} u;
};
@@ -856,6 +862,9 @@
#define KVM_CAP_IOEVENTFD_ANY_LENGTH 122
#define KVM_CAP_HYPERV_SYNIC 123
#define KVM_CAP_S390_RI 124
+#define KVM_CAP_SPAPR_TCE_64 125
+#define KVM_CAP_ARM_PMU_V3 126
+#define KVM_CAP_VCPU_ATTRIBUTES 127
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1148,6 +1157,8 @@
/* Available with KVM_CAP_PPC_ALLOC_HTAB */
#define KVM_PPC_ALLOCATE_HTAB _IOWR(KVMIO, 0xa7, __u32)
#define KVM_CREATE_SPAPR_TCE _IOW(KVMIO, 0xa8, struct kvm_create_spapr_tce)
+#define KVM_CREATE_SPAPR_TCE_64 _IOW(KVMIO, 0xa8, \
+ struct kvm_create_spapr_tce_64)
/* Available with KVM_CAP_RMA */
#define KVM_ALLOCATE_RMA _IOR(KVMIO, 0xa9, struct kvm_allocate_rma)
/* Available with KVM_CAP_PPC_HTAB_FD */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index 5b4a4be..cc68b921 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -66,14 +66,18 @@
__u64 length;
__u32 status;
__u32 max_ars_out;
+ __u32 clear_err_unit;
+ __u32 reserved;
} __packed;
struct nd_cmd_ars_start {
__u64 address;
__u64 length;
__u16 type;
- __u8 reserved[6];
+ __u8 flags;
+ __u8 reserved[5];
__u32 status;
+ __u32 scrub_time;
} __packed;
struct nd_cmd_ars_status {
@@ -81,11 +85,14 @@
__u32 out_length;
__u64 address;
__u64 length;
+ __u64 restart_address;
+ __u64 restart_length;
__u16 type;
+ __u16 flags;
__u32 num_records;
struct nd_ars_record {
__u32 handle;
- __u32 flags;
+ __u32 reserved;
__u64 err_address;
__u64 length;
} __packed records[0];
diff --git a/ipc/shm.c b/ipc/shm.c
index ed3027d..331fc1b 100644
--- a/ipc/shm.c
+++ b/ipc/shm.c
@@ -156,11 +156,12 @@
struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id);
/*
- * We raced in the idr lookup or with shm_destroy(). Either way, the
- * ID is busted.
+ * Callers of shm_lock() must validate the status of the returned ipc
+ * object pointer (as returned by ipc_lock()), and error out as
+ * appropriate.
*/
- WARN_ON(IS_ERR(ipcp));
-
+ if (IS_ERR(ipcp))
+ return (void *)ipcp;
return container_of(ipcp, struct shmid_kernel, shm_perm);
}
@@ -186,18 +187,33 @@
}
-/* This is called by fork, once for every shm attach. */
-static void shm_open(struct vm_area_struct *vma)
+static int __shm_open(struct vm_area_struct *vma)
{
struct file *file = vma->vm_file;
struct shm_file_data *sfd = shm_file_data(file);
struct shmid_kernel *shp;
shp = shm_lock(sfd->ns, sfd->id);
+
+ if (IS_ERR(shp))
+ return PTR_ERR(shp);
+
shp->shm_atim = get_seconds();
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_nattch++;
shm_unlock(shp);
+ return 0;
+}
+
+/* This is called by fork, once for every shm attach. */
+static void shm_open(struct vm_area_struct *vma)
+{
+ int err = __shm_open(vma);
+ /*
+ * We raced in the idr lookup or with shm_destroy().
+ * Either way, the ID is busted.
+ */
+ WARN_ON_ONCE(err);
}
/*
@@ -260,6 +276,14 @@
down_write(&shm_ids(ns).rwsem);
/* remove from the list of attaches of the shm segment */
shp = shm_lock(ns, sfd->id);
+
+ /*
+ * We raced in the idr lookup or with shm_destroy().
+ * Either way, the ID is busted.
+ */
+ if (WARN_ON_ONCE(IS_ERR(shp)))
+ goto done; /* no-op */
+
shp->shm_lprid = task_tgid_vnr(current);
shp->shm_dtim = get_seconds();
shp->shm_nattch--;
@@ -267,6 +291,7 @@
shm_destroy(ns, shp);
else
shm_unlock(shp);
+done:
up_write(&shm_ids(ns).rwsem);
}
@@ -388,17 +413,25 @@
struct shm_file_data *sfd = shm_file_data(file);
int ret;
- ret = sfd->file->f_op->mmap(sfd->file, vma);
- if (ret != 0)
+ /*
+ * In case of remap_file_pages() emulation, the file can represent
+ * removed IPC ID: propogate shm_lock() error to caller.
+ */
+ ret =__shm_open(vma);
+ if (ret)
return ret;
+
+ ret = sfd->file->f_op->mmap(sfd->file, vma);
+ if (ret) {
+ shm_close(vma);
+ return ret;
+ }
sfd->vm_ops = vma->vm_ops;
#ifdef CONFIG_MMU
WARN_ON(!sfd->vm_ops->fault);
#endif
vma->vm_ops = &shm_vm_ops;
- shm_open(vma);
-
- return ret;
+ return 0;
}
static int shm_release(struct inode *ino, struct file *file)
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index d1d3e8f..2e7f7ab 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -2082,7 +2082,7 @@
/* adjust offset of jmps if necessary */
if (i < pos && i + insn->off + 1 > pos)
insn->off += delta;
- else if (i > pos && i + insn->off + 1 < pos)
+ else if (i > pos + delta && i + insn->off + 1 <= pos + delta)
insn->off -= delta;
}
}
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index c03a640..d27904c 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -58,6 +58,7 @@
#include <linux/kthread.h>
#include <linux/delay.h>
#include <linux/atomic.h>
+#include <linux/cpuset.h>
#include <net/sock.h>
/*
@@ -2739,6 +2740,7 @@
out_unlock_threadgroup:
percpu_up_write(&cgroup_threadgroup_rwsem);
cgroup_kn_unlock(of->kn);
+ cpuset_post_attach_flush();
return ret ?: nbytes;
}
@@ -4655,14 +4657,15 @@
if (ss) {
/* css free path */
+ struct cgroup_subsys_state *parent = css->parent;
int id = css->id;
- if (css->parent)
- css_put(css->parent);
-
ss->css_free(css);
cgroup_idr_remove(&ss->css_idr, id);
cgroup_put(cgrp);
+
+ if (parent)
+ css_put(parent);
} else {
/* cgroup free path */
atomic_dec(&cgrp->root->nr_cgrps);
@@ -4758,6 +4761,7 @@
INIT_LIST_HEAD(&css->sibling);
INIT_LIST_HEAD(&css->children);
css->serial_nr = css_serial_nr_next++;
+ atomic_set(&css->online_cnt, 0);
if (cgroup_parent(cgrp)) {
css->parent = cgroup_css(cgroup_parent(cgrp), ss);
@@ -4780,6 +4784,10 @@
if (!ret) {
css->flags |= CSS_ONLINE;
rcu_assign_pointer(css->cgroup->subsys[ss->id], css);
+
+ atomic_inc(&css->online_cnt);
+ if (css->parent)
+ atomic_inc(&css->parent->online_cnt);
}
return ret;
}
@@ -5017,10 +5025,15 @@
container_of(work, struct cgroup_subsys_state, destroy_work);
mutex_lock(&cgroup_mutex);
- offline_css(css);
- mutex_unlock(&cgroup_mutex);
- css_put(css);
+ do {
+ offline_css(css);
+ css_put(css);
+ /* @css can't go away while we're holding cgroup_mutex */
+ css = css->parent;
+ } while (css && atomic_dec_and_test(&css->online_cnt));
+
+ mutex_unlock(&cgroup_mutex);
}
/* css kill confirmation processing requires process context, bounce */
@@ -5029,8 +5042,10 @@
struct cgroup_subsys_state *css =
container_of(ref, struct cgroup_subsys_state, refcnt);
- INIT_WORK(&css->destroy_work, css_killed_work_fn);
- queue_work(cgroup_destroy_wq, &css->destroy_work);
+ if (atomic_dec_and_test(&css->online_cnt)) {
+ INIT_WORK(&css->destroy_work, css_killed_work_fn);
+ queue_work(cgroup_destroy_wq, &css->destroy_work);
+ }
}
/**
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 3e945fc..41989ab 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -287,6 +287,8 @@
static DEFINE_MUTEX(cpuset_mutex);
static DEFINE_SPINLOCK(callback_lock);
+static struct workqueue_struct *cpuset_migrate_mm_wq;
+
/*
* CPU / memory hotplug is handled asynchronously.
*/
@@ -972,31 +974,51 @@
}
/*
- * cpuset_migrate_mm
- *
- * Migrate memory region from one set of nodes to another.
- *
- * Temporarilly set tasks mems_allowed to target nodes of migration,
- * so that the migration code can allocate pages on these nodes.
- *
- * While the mm_struct we are migrating is typically from some
- * other task, the task_struct mems_allowed that we are hacking
- * is for our current task, which must allocate new pages for that
- * migrating memory region.
+ * Migrate memory region from one set of nodes to another. This is
+ * performed asynchronously as it can be called from process migration path
+ * holding locks involved in process management. All mm migrations are
+ * performed in the queued order and can be waited for by flushing
+ * cpuset_migrate_mm_wq.
*/
+struct cpuset_migrate_mm_work {
+ struct work_struct work;
+ struct mm_struct *mm;
+ nodemask_t from;
+ nodemask_t to;
+};
+
+static void cpuset_migrate_mm_workfn(struct work_struct *work)
+{
+ struct cpuset_migrate_mm_work *mwork =
+ container_of(work, struct cpuset_migrate_mm_work, work);
+
+ /* on a wq worker, no need to worry about %current's mems_allowed */
+ do_migrate_pages(mwork->mm, &mwork->from, &mwork->to, MPOL_MF_MOVE_ALL);
+ mmput(mwork->mm);
+ kfree(mwork);
+}
+
static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
const nodemask_t *to)
{
- struct task_struct *tsk = current;
+ struct cpuset_migrate_mm_work *mwork;
- tsk->mems_allowed = *to;
+ mwork = kzalloc(sizeof(*mwork), GFP_KERNEL);
+ if (mwork) {
+ mwork->mm = mm;
+ mwork->from = *from;
+ mwork->to = *to;
+ INIT_WORK(&mwork->work, cpuset_migrate_mm_workfn);
+ queue_work(cpuset_migrate_mm_wq, &mwork->work);
+ } else {
+ mmput(mm);
+ }
+}
- do_migrate_pages(mm, from, to, MPOL_MF_MOVE_ALL);
-
- rcu_read_lock();
- guarantee_online_mems(task_cs(tsk), &tsk->mems_allowed);
- rcu_read_unlock();
+void cpuset_post_attach_flush(void)
+{
+ flush_workqueue(cpuset_migrate_mm_wq);
}
/*
@@ -1097,7 +1119,8 @@
mpol_rebind_mm(mm, &cs->mems_allowed);
if (migrate)
cpuset_migrate_mm(mm, &cs->old_mems_allowed, &newmems);
- mmput(mm);
+ else
+ mmput(mm);
}
css_task_iter_end(&it);
@@ -1545,11 +1568,11 @@
* @old_mems_allowed is the right nodesets that we
* migrate mm from.
*/
- if (is_memory_migrate(cs)) {
+ if (is_memory_migrate(cs))
cpuset_migrate_mm(mm, &oldcs->old_mems_allowed,
&cpuset_attach_nodemask_to);
- }
- mmput(mm);
+ else
+ mmput(mm);
}
}
@@ -1714,6 +1737,7 @@
mutex_unlock(&cpuset_mutex);
kernfs_unbreak_active_protection(of->kn);
css_put(&cs->css);
+ flush_workqueue(cpuset_migrate_mm_wq);
return retval ?: nbytes;
}
@@ -2359,6 +2383,9 @@
top_cpuset.effective_mems = node_states[N_MEMORY];
register_hotmemory_notifier(&cpuset_track_online_nodes_nb);
+
+ cpuset_migrate_mm_wq = alloc_ordered_workqueue("cpuset_migrate_mm", 0);
+ BUG_ON(!cpuset_migrate_mm_wq);
}
/**
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 5946460..6146148 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -64,8 +64,17 @@
struct task_struct *p = tfc->p;
if (p) {
- tfc->ret = -EAGAIN;
- if (task_cpu(p) != smp_processor_id() || !task_curr(p))
+ /* -EAGAIN */
+ if (task_cpu(p) != smp_processor_id())
+ return;
+
+ /*
+ * Now that we're on right CPU with IRQs disabled, we can test
+ * if we hit the right task without races.
+ */
+
+ tfc->ret = -ESRCH; /* No such (running) process */
+ if (p != current)
return;
}
@@ -92,13 +101,17 @@
.p = p,
.func = func,
.info = info,
- .ret = -ESRCH, /* No such (running) process */
+ .ret = -EAGAIN,
};
+ int ret;
- if (task_curr(p))
- smp_call_function_single(task_cpu(p), remote_function, &data, 1);
+ do {
+ ret = smp_call_function_single(task_cpu(p), remote_function, &data, 1);
+ if (!ret)
+ ret = data.ret;
+ } while (ret == -EAGAIN);
- return data.ret;
+ return ret;
}
/**
@@ -169,19 +182,6 @@
* rely on ctx->is_active and therefore cannot use event_function_call().
* See perf_install_in_context().
*
- * This is because we need a ctx->lock serialized variable (ctx->is_active)
- * to reliably determine if a particular task/context is scheduled in. The
- * task_curr() use in task_function_call() is racy in that a remote context
- * switch is not a single atomic operation.
- *
- * As is, the situation is 'safe' because we set rq->curr before we do the
- * actual context switch. This means that task_curr() will fail early, but
- * we'll continue spinning on ctx->is_active until we've passed
- * perf_event_task_sched_out().
- *
- * Without this ctx->lock serialized variable we could have race where we find
- * the task (and hence the context) would not be active while in fact they are.
- *
* If ctx->nr_events, then ctx->is_active and cpuctx->task_ctx are set.
*/
@@ -212,7 +212,7 @@
*/
if (ctx->task) {
if (ctx->task != current) {
- ret = -EAGAIN;
+ ret = -ESRCH;
goto unlock;
}
@@ -276,10 +276,10 @@
return;
}
-again:
if (task == TASK_TOMBSTONE)
return;
+again:
if (!task_function_call(task, event_function, &efs))
return;
@@ -289,13 +289,15 @@
* a concurrent perf_event_context_sched_out().
*/
task = ctx->task;
- if (task != TASK_TOMBSTONE) {
- if (ctx->is_active) {
- raw_spin_unlock_irq(&ctx->lock);
- goto again;
- }
- func(event, NULL, ctx, data);
+ if (task == TASK_TOMBSTONE) {
+ raw_spin_unlock_irq(&ctx->lock);
+ return;
}
+ if (ctx->is_active) {
+ raw_spin_unlock_irq(&ctx->lock);
+ goto again;
+ }
+ func(event, NULL, ctx, data);
raw_spin_unlock_irq(&ctx->lock);
}
@@ -314,6 +316,7 @@
enum event_type_t {
EVENT_FLEXIBLE = 0x1,
EVENT_PINNED = 0x2,
+ EVENT_TIME = 0x4,
EVENT_ALL = EVENT_FLEXIBLE | EVENT_PINNED,
};
@@ -321,7 +324,13 @@
* perf_sched_events : >0 events exist
* perf_cgroup_events: >0 per-cpu cgroup events exist on this cpu
*/
-struct static_key_deferred perf_sched_events __read_mostly;
+
+static void perf_sched_delayed(struct work_struct *work);
+DEFINE_STATIC_KEY_FALSE(perf_sched_events);
+static DECLARE_DELAYED_WORK(perf_sched_work, perf_sched_delayed);
+static DEFINE_MUTEX(perf_sched_mutex);
+static atomic_t perf_sched_count;
+
static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
static DEFINE_PER_CPU(int, perf_sched_cb_usages);
@@ -1288,16 +1297,18 @@
/*
* Update the total_time_enabled and total_time_running fields for a event.
- * The caller of this function needs to hold the ctx->lock.
*/
static void update_event_times(struct perf_event *event)
{
struct perf_event_context *ctx = event->ctx;
u64 run_end;
+ lockdep_assert_held(&ctx->lock);
+
if (event->state < PERF_EVENT_STATE_INACTIVE ||
event->group_leader->state < PERF_EVENT_STATE_INACTIVE)
return;
+
/*
* in cgroup mode, time_enabled represents
* the time the event was enabled AND active
@@ -1645,7 +1656,7 @@
static bool is_orphaned_event(struct perf_event *event)
{
- return event->state == PERF_EVENT_STATE_EXIT;
+ return event->state == PERF_EVENT_STATE_DEAD;
}
static inline int pmu_filter_match(struct perf_event *event)
@@ -1690,14 +1701,14 @@
perf_pmu_disable(event->pmu);
+ event->tstamp_stopped = tstamp;
+ event->pmu->del(event, 0);
+ event->oncpu = -1;
event->state = PERF_EVENT_STATE_INACTIVE;
if (event->pending_disable) {
event->pending_disable = 0;
event->state = PERF_EVENT_STATE_OFF;
}
- event->tstamp_stopped = tstamp;
- event->pmu->del(event, 0);
- event->oncpu = -1;
if (!is_software_event(event))
cpuctx->active_oncpu--;
@@ -1732,7 +1743,6 @@
}
#define DETACH_GROUP 0x01UL
-#define DETACH_STATE 0x02UL
/*
* Cross CPU call to remove a performance event
@@ -1752,8 +1762,6 @@
if (flags & DETACH_GROUP)
perf_group_detach(event);
list_del_event(event, ctx);
- if (flags & DETACH_STATE)
- event->state = PERF_EVENT_STATE_EXIT;
if (!ctx->nr_events && ctx->is_active) {
ctx->is_active = 0;
@@ -2063,14 +2071,27 @@
event->tstamp_stopped = tstamp;
}
-static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx);
+static void ctx_sched_out(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ enum event_type_t event_type);
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
struct task_struct *task);
+static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
+ struct perf_event_context *ctx)
+{
+ if (!cpuctx->task_ctx)
+ return;
+
+ if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
+ return;
+
+ ctx_sched_out(ctx, cpuctx, EVENT_ALL);
+}
+
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
struct task_struct *task)
@@ -2097,49 +2118,68 @@
/*
* Cross CPU call to install and enable a performance event
*
- * Must be called with ctx->mutex held
+ * Very similar to remote_function() + event_function() but cannot assume that
+ * things like ctx->is_active and cpuctx->task_ctx are set.
*/
static int __perf_install_in_context(void *info)
{
- struct perf_event_context *ctx = info;
+ struct perf_event *event = info;
+ struct perf_event_context *ctx = event->ctx;
struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
struct perf_event_context *task_ctx = cpuctx->task_ctx;
+ bool activate = true;
+ int ret = 0;
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx->task) {
raw_spin_lock(&ctx->lock);
- /*
- * If we hit the 'wrong' task, we've since scheduled and
- * everything should be sorted, nothing to do!
- */
task_ctx = ctx;
- if (ctx->task != current)
+
+ /* If we're on the wrong CPU, try again */
+ if (task_cpu(ctx->task) != smp_processor_id()) {
+ ret = -ESRCH;
goto unlock;
+ }
/*
- * If task_ctx is set, it had better be to us.
+ * If we're on the right CPU, see if the task we target is
+ * current, if not we don't have to activate the ctx, a future
+ * context switch will do that for us.
*/
- WARN_ON_ONCE(cpuctx->task_ctx != ctx && cpuctx->task_ctx);
+ if (ctx->task != current)
+ activate = false;
+ else
+ WARN_ON_ONCE(cpuctx->task_ctx && cpuctx->task_ctx != ctx);
+
} else if (task_ctx) {
raw_spin_lock(&task_ctx->lock);
}
- ctx_resched(cpuctx, task_ctx);
+ if (activate) {
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+ add_event_to_ctx(event, ctx);
+ ctx_resched(cpuctx, task_ctx);
+ } else {
+ add_event_to_ctx(event, ctx);
+ }
+
unlock:
perf_ctx_unlock(cpuctx, task_ctx);
- return 0;
+ return ret;
}
/*
- * Attach a performance event to a context
+ * Attach a performance event to a context.
+ *
+ * Very similar to event_function_call, see comment there.
*/
static void
perf_install_in_context(struct perf_event_context *ctx,
struct perf_event *event,
int cpu)
{
- struct task_struct *task = NULL;
+ struct task_struct *task = READ_ONCE(ctx->task);
lockdep_assert_held(&ctx->mutex);
@@ -2147,40 +2187,46 @@
if (event->cpu != -1)
event->cpu = cpu;
+ if (!task) {
+ cpu_function_call(cpu, __perf_install_in_context, event);
+ return;
+ }
+
+ /*
+ * Should not happen, we validate the ctx is still alive before calling.
+ */
+ if (WARN_ON_ONCE(task == TASK_TOMBSTONE))
+ return;
+
/*
* Installing events is tricky because we cannot rely on ctx->is_active
* to be set in case this is the nr_events 0 -> 1 transition.
- *
- * So what we do is we add the event to the list here, which will allow
- * a future context switch to DTRT and then send a racy IPI. If the IPI
- * fails to hit the right task, this means a context switch must have
- * happened and that will have taken care of business.
*/
+again:
+ /*
+ * Cannot use task_function_call() because we need to run on the task's
+ * CPU regardless of whether its current or not.
+ */
+ if (!cpu_function_call(task_cpu(task), __perf_install_in_context, event))
+ return;
+
raw_spin_lock_irq(&ctx->lock);
task = ctx->task;
- /*
- * Worse, we cannot even rely on the ctx actually existing anymore. If
- * between find_get_context() and perf_install_in_context() the task
- * went through perf_event_exit_task() its dead and we should not be
- * adding new events.
- */
- if (task == TASK_TOMBSTONE) {
+ if (WARN_ON_ONCE(task == TASK_TOMBSTONE)) {
+ /*
+ * Cannot happen because we already checked above (which also
+ * cannot happen), and we hold ctx->mutex, which serializes us
+ * against perf_event_exit_task_context().
+ */
raw_spin_unlock_irq(&ctx->lock);
return;
}
- update_context_time(ctx);
- /*
- * Update cgrp time only if current cgrp matches event->cgrp.
- * Must be done before calling add_event_to_ctx().
- */
- update_cgrp_time_from_event(event);
- add_event_to_ctx(event, ctx);
raw_spin_unlock_irq(&ctx->lock);
-
- if (task)
- task_function_call(task, __perf_install_in_context, ctx);
- else
- cpu_function_call(cpu, __perf_install_in_context, ctx);
+ /*
+ * Since !ctx->is_active doesn't mean anything, we must IPI
+ * unconditionally.
+ */
+ goto again;
}
/*
@@ -2219,17 +2265,18 @@
event->state <= PERF_EVENT_STATE_ERROR)
return;
- update_context_time(ctx);
+ if (ctx->is_active)
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+
__perf_event_mark_enabled(event);
if (!ctx->is_active)
return;
if (!event_filter_match(event)) {
- if (is_cgroup_event(event)) {
- perf_cgroup_set_timestamp(current, ctx); // XXX ?
+ if (is_cgroup_event(event))
perf_cgroup_defer_enabled(event);
- }
+ ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
return;
}
@@ -2237,8 +2284,10 @@
* If the event is in a group and isn't the group leader,
* then don't put it on unless the group is on.
*/
- if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE)
+ if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
+ ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
return;
+ }
task_ctx = cpuctx->task_ctx;
if (ctx->task)
@@ -2344,24 +2393,33 @@
}
ctx->is_active &= ~event_type;
+ if (!(ctx->is_active & EVENT_ALL))
+ ctx->is_active = 0;
+
if (ctx->task) {
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
if (!ctx->is_active)
cpuctx->task_ctx = NULL;
}
- update_context_time(ctx);
- update_cgrp_time_from_cpuctx(cpuctx);
- if (!ctx->nr_active)
+ is_active ^= ctx->is_active; /* changed bits */
+
+ if (is_active & EVENT_TIME) {
+ /* update (and stop) ctx time */
+ update_context_time(ctx);
+ update_cgrp_time_from_cpuctx(cpuctx);
+ }
+
+ if (!ctx->nr_active || !(is_active & EVENT_ALL))
return;
perf_pmu_disable(ctx->pmu);
- if ((is_active & EVENT_PINNED) && (event_type & EVENT_PINNED)) {
+ if (is_active & EVENT_PINNED) {
list_for_each_entry(event, &ctx->pinned_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
- if ((is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE)) {
+ if (is_active & EVENT_FLEXIBLE) {
list_for_each_entry(event, &ctx->flexible_groups, group_entry)
group_sched_out(event, cpuctx, ctx);
}
@@ -2641,18 +2699,6 @@
perf_cgroup_sched_out(task, next);
}
-static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
- struct perf_event_context *ctx)
-{
- if (!cpuctx->task_ctx)
- return;
-
- if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
- return;
-
- ctx_sched_out(ctx, cpuctx, EVENT_ALL);
-}
-
/*
* Called with IRQs disabled
*/
@@ -2735,7 +2781,7 @@
if (likely(!ctx->nr_events))
return;
- ctx->is_active |= event_type;
+ ctx->is_active |= (event_type | EVENT_TIME);
if (ctx->task) {
if (!is_active)
cpuctx->task_ctx = ctx;
@@ -2743,18 +2789,24 @@
WARN_ON_ONCE(cpuctx->task_ctx != ctx);
}
- now = perf_clock();
- ctx->timestamp = now;
- perf_cgroup_set_timestamp(task, ctx);
+ is_active ^= ctx->is_active; /* changed bits */
+
+ if (is_active & EVENT_TIME) {
+ /* start ctx time */
+ now = perf_clock();
+ ctx->timestamp = now;
+ perf_cgroup_set_timestamp(task, ctx);
+ }
+
/*
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
- if (!(is_active & EVENT_PINNED) && (event_type & EVENT_PINNED))
+ if (is_active & EVENT_PINNED)
ctx_pinned_sched_in(ctx, cpuctx);
/* Then walk through the lower prio flexible groups */
- if (!(is_active & EVENT_FLEXIBLE) && (event_type & EVENT_FLEXIBLE))
+ if (is_active & EVENT_FLEXIBLE)
ctx_flexible_sched_in(ctx, cpuctx);
}
@@ -3120,6 +3172,7 @@
cpuctx = __get_cpu_context(ctx);
perf_ctx_lock(cpuctx, ctx);
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME);
list_for_each_entry(event, &ctx->event_list, event_entry)
enabled |= event_enable_on_exec(event, ctx);
@@ -3537,12 +3590,22 @@
if (has_branch_stack(event))
dec = true;
- if (dec)
- static_key_slow_dec_deferred(&perf_sched_events);
+ if (dec) {
+ if (!atomic_add_unless(&perf_sched_count, -1, 1))
+ schedule_delayed_work(&perf_sched_work, HZ);
+ }
unaccount_event_cpu(event, event->cpu);
}
+static void perf_sched_delayed(struct work_struct *work)
+{
+ mutex_lock(&perf_sched_mutex);
+ if (atomic_dec_and_test(&perf_sched_count))
+ static_branch_disable(&perf_sched_events);
+ mutex_unlock(&perf_sched_mutex);
+}
+
/*
* The following implement mutual exclusion of events on "exclusive" pmus
* (PERF_PMU_CAP_EXCLUSIVE). Such pmus can only have one event scheduled
@@ -3752,30 +3815,42 @@
*/
int perf_event_release_kernel(struct perf_event *event)
{
- struct perf_event_context *ctx;
+ struct perf_event_context *ctx = event->ctx;
struct perf_event *child, *tmp;
+ /*
+ * If we got here through err_file: fput(event_file); we will not have
+ * attached to a context yet.
+ */
+ if (!ctx) {
+ WARN_ON_ONCE(event->attach_state &
+ (PERF_ATTACH_CONTEXT|PERF_ATTACH_GROUP));
+ goto no_ctx;
+ }
+
if (!is_kernel_event(event))
perf_remove_from_owner(event);
ctx = perf_event_ctx_lock(event);
WARN_ON_ONCE(ctx->parent_ctx);
- perf_remove_from_context(event, DETACH_GROUP | DETACH_STATE);
- perf_event_ctx_unlock(event, ctx);
+ perf_remove_from_context(event, DETACH_GROUP);
+ raw_spin_lock_irq(&ctx->lock);
/*
- * At this point we must have event->state == PERF_EVENT_STATE_EXIT,
- * either from the above perf_remove_from_context() or through
- * perf_event_exit_event().
+ * Mark this even as STATE_DEAD, there is no external reference to it
+ * anymore.
*
- * Therefore, anybody acquiring event->child_mutex after the below
- * loop _must_ also see this, most importantly inherit_event() which
- * will avoid placing more children on the list.
+ * Anybody acquiring event->child_mutex after the below loop _must_
+ * also see this, most importantly inherit_event() which will avoid
+ * placing more children on the list.
*
* Thus this guarantees that we will in fact observe and kill _ALL_
* child events.
*/
- WARN_ON_ONCE(event->state != PERF_EVENT_STATE_EXIT);
+ event->state = PERF_EVENT_STATE_DEAD;
+ raw_spin_unlock_irq(&ctx->lock);
+
+ perf_event_ctx_unlock(event, ctx);
again:
mutex_lock(&event->child_mutex);
@@ -3830,8 +3905,8 @@
}
mutex_unlock(&event->child_mutex);
- /* Must be the last reference */
- put_event(event);
+no_ctx:
+ put_event(event); /* Must be the 'last' reference */
return 0;
}
EXPORT_SYMBOL_GPL(perf_event_release_kernel);
@@ -3988,7 +4063,7 @@
{
bool no_children;
- if (event->state != PERF_EVENT_STATE_EXIT)
+ if (event->state > PERF_EVENT_STATE_EXIT)
return false;
mutex_lock(&event->child_mutex);
@@ -7769,8 +7844,28 @@
if (is_cgroup_event(event))
inc = true;
- if (inc)
- static_key_slow_inc(&perf_sched_events.key);
+ if (inc) {
+ if (atomic_inc_not_zero(&perf_sched_count))
+ goto enabled;
+
+ mutex_lock(&perf_sched_mutex);
+ if (!atomic_read(&perf_sched_count)) {
+ static_branch_enable(&perf_sched_events);
+ /*
+ * Guarantee that all CPUs observe they key change and
+ * call the perf scheduling hooks before proceeding to
+ * install events that need them.
+ */
+ synchronize_sched();
+ }
+ /*
+ * Now that we have waited for the sync_sched(), allow further
+ * increments to by-pass the mutex.
+ */
+ atomic_inc(&perf_sched_count);
+ mutex_unlock(&perf_sched_mutex);
+ }
+enabled:
account_event_cpu(event, event->cpu);
}
@@ -8389,10 +8484,19 @@
if (move_group) {
gctx = group_leader->ctx;
mutex_lock_double(&gctx->mutex, &ctx->mutex);
+ if (gctx->task == TASK_TOMBSTONE) {
+ err = -ESRCH;
+ goto err_locked;
+ }
} else {
mutex_lock(&ctx->mutex);
}
+ if (ctx->task == TASK_TOMBSTONE) {
+ err = -ESRCH;
+ goto err_locked;
+ }
+
if (!perf_event_validate_size(event)) {
err = -E2BIG;
goto err_locked;
@@ -8509,7 +8613,12 @@
perf_unpin_context(ctx);
put_ctx(ctx);
err_alloc:
- free_event(event);
+ /*
+ * If event_file is set, the fput() above will have called ->release()
+ * and that will take care of freeing the event.
+ */
+ if (!event_file)
+ free_event(event);
err_cpus:
put_online_cpus();
err_task:
@@ -8563,12 +8672,14 @@
WARN_ON_ONCE(ctx->parent_ctx);
mutex_lock(&ctx->mutex);
+ if (ctx->task == TASK_TOMBSTONE) {
+ err = -ESRCH;
+ goto err_unlock;
+ }
+
if (!exclusive_event_installable(event, ctx)) {
- mutex_unlock(&ctx->mutex);
- perf_unpin_context(ctx);
- put_ctx(ctx);
err = -EBUSY;
- goto err_free;
+ goto err_unlock;
}
perf_install_in_context(ctx, event, cpu);
@@ -8577,6 +8688,10 @@
return event;
+err_unlock:
+ mutex_unlock(&ctx->mutex);
+ perf_unpin_context(ctx);
+ put_ctx(ctx);
err_free:
free_event(event);
err:
@@ -8695,7 +8810,7 @@
if (parent_event)
perf_group_detach(child_event);
list_del_event(child_event, child_ctx);
- child_event->state = PERF_EVENT_STATE_EXIT; /* see perf_event_release_kernel() */
+ child_event->state = PERF_EVENT_STATE_EXIT; /* is_event_hup() */
raw_spin_unlock_irq(&child_ctx->lock);
/*
@@ -9206,7 +9321,7 @@
struct swevent_htable *swhash = &per_cpu(swevent_htable, cpu);
mutex_lock(&swhash->hlist_mutex);
- if (swhash->hlist_refcount > 0) {
+ if (swhash->hlist_refcount > 0 && !swevent_hlist_deref(swhash)) {
struct swevent_hlist *hlist;
hlist = kzalloc_node(sizeof(*hlist), GFP_KERNEL, cpu_to_node(cpu));
@@ -9282,11 +9397,9 @@
switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
- case CPU_DOWN_FAILED:
perf_event_init_cpu(cpu);
break;
- case CPU_UP_CANCELED:
case CPU_DOWN_PREPARE:
perf_event_exit_cpu(cpu);
break;
@@ -9315,9 +9428,6 @@
ret = init_hw_breakpoint();
WARN(ret, "hw_breakpoint initialization failed with: %d", ret);
- /* do not patch jump label more than once per second */
- jump_label_rate_limit(&perf_sched_events, HZ);
-
/*
* Build time assertion that we keep the data_head at the intended
* location. IOW, validation we got the __reserved[] size right.
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 60ace56..716547f 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -292,7 +292,7 @@
#define __classhashfn(key) hash_long((unsigned long)key, CLASSHASH_BITS)
#define classhashentry(key) (classhash_table + __classhashfn((key)))
-static struct list_head classhash_table[CLASSHASH_SIZE];
+static struct hlist_head classhash_table[CLASSHASH_SIZE];
/*
* We put the lock dependency chains into a hash-table as well, to cache
@@ -303,7 +303,7 @@
#define __chainhashfn(chain) hash_long(chain, CHAINHASH_BITS)
#define chainhashentry(chain) (chainhash_table + __chainhashfn((chain)))
-static struct list_head chainhash_table[CHAINHASH_SIZE];
+static struct hlist_head chainhash_table[CHAINHASH_SIZE];
/*
* The hash key of the lock dependency chains is a hash itself too:
@@ -666,7 +666,7 @@
look_up_lock_class(struct lockdep_map *lock, unsigned int subclass)
{
struct lockdep_subclass_key *key;
- struct list_head *hash_head;
+ struct hlist_head *hash_head;
struct lock_class *class;
#ifdef CONFIG_DEBUG_LOCKDEP
@@ -719,7 +719,7 @@
if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
return NULL;
- list_for_each_entry_rcu(class, hash_head, hash_entry) {
+ hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key) {
/*
* Huh! same key, different name? Did someone trample
@@ -742,7 +742,7 @@
register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
{
struct lockdep_subclass_key *key;
- struct list_head *hash_head;
+ struct hlist_head *hash_head;
struct lock_class *class;
DEBUG_LOCKS_WARN_ON(!irqs_disabled());
@@ -774,7 +774,7 @@
* We have to do the hash-walk again, to avoid races
* with another CPU:
*/
- list_for_each_entry_rcu(class, hash_head, hash_entry) {
+ hlist_for_each_entry_rcu(class, hash_head, hash_entry) {
if (class->key == key)
goto out_unlock_set;
}
@@ -805,7 +805,7 @@
* We use RCU's safe list-add method to make
* parallel walking of the hash-list safe:
*/
- list_add_tail_rcu(&class->hash_entry, hash_head);
+ hlist_add_head_rcu(&class->hash_entry, hash_head);
/*
* Add it to the global list of classes:
*/
@@ -1822,7 +1822,7 @@
*/
static int
check_prev_add(struct task_struct *curr, struct held_lock *prev,
- struct held_lock *next, int distance, int trylock_loop)
+ struct held_lock *next, int distance, int *stack_saved)
{
struct lock_list *entry;
int ret;
@@ -1883,8 +1883,11 @@
}
}
- if (!trylock_loop && !save_trace(&trace))
- return 0;
+ if (!*stack_saved) {
+ if (!save_trace(&trace))
+ return 0;
+ *stack_saved = 1;
+ }
/*
* Ok, all validations passed, add the new lock
@@ -1907,6 +1910,8 @@
* Debugging printouts:
*/
if (verbose(hlock_class(prev)) || verbose(hlock_class(next))) {
+ /* We drop graph lock, so another thread can overwrite trace. */
+ *stack_saved = 0;
graph_unlock();
printk("\n new dependency: ");
print_lock_name(hlock_class(prev));
@@ -1929,7 +1934,7 @@
check_prevs_add(struct task_struct *curr, struct held_lock *next)
{
int depth = curr->lockdep_depth;
- int trylock_loop = 0;
+ int stack_saved = 0;
struct held_lock *hlock;
/*
@@ -1956,7 +1961,7 @@
*/
if (hlock->read != 2 && hlock->check) {
if (!check_prev_add(curr, hlock, next,
- distance, trylock_loop))
+ distance, &stack_saved))
return 0;
/*
* Stop after the first non-trylock entry,
@@ -1979,7 +1984,6 @@
if (curr->held_locks[depth].irq_context !=
curr->held_locks[depth-1].irq_context)
break;
- trylock_loop = 1;
}
return 1;
out_bug:
@@ -2017,7 +2021,7 @@
u64 chain_key)
{
struct lock_class *class = hlock_class(hlock);
- struct list_head *hash_head = chainhashentry(chain_key);
+ struct hlist_head *hash_head = chainhashentry(chain_key);
struct lock_chain *chain;
struct held_lock *hlock_curr;
int i, j;
@@ -2033,7 +2037,7 @@
* We can walk it lock-free, because entries only get added
* to the hash:
*/
- list_for_each_entry_rcu(chain, hash_head, entry) {
+ hlist_for_each_entry_rcu(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
cache_hit:
debug_atomic_inc(chain_lookup_hits);
@@ -2057,7 +2061,7 @@
/*
* We have to walk the chain again locked - to avoid duplicates:
*/
- list_for_each_entry(chain, hash_head, entry) {
+ hlist_for_each_entry(chain, hash_head, entry) {
if (chain->chain_key == chain_key) {
graph_unlock();
goto cache_hit;
@@ -2091,7 +2095,7 @@
}
chain_hlocks[chain->base + j] = class - lock_classes;
}
- list_add_tail_rcu(&chain->entry, hash_head);
+ hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
inc_chains();
@@ -3875,7 +3879,7 @@
nr_process_chains = 0;
debug_locks = 1;
for (i = 0; i < CHAINHASH_SIZE; i++)
- INIT_LIST_HEAD(chainhash_table + i);
+ INIT_HLIST_HEAD(chainhash_table + i);
raw_local_irq_restore(flags);
}
@@ -3894,7 +3898,7 @@
/*
* Unhash the class and remove it from the all_lock_classes list:
*/
- list_del_rcu(&class->hash_entry);
+ hlist_del_rcu(&class->hash_entry);
list_del_rcu(&class->lock_entry);
RCU_INIT_POINTER(class->key, NULL);
@@ -3917,7 +3921,7 @@
void lockdep_free_key_range(void *start, unsigned long size)
{
struct lock_class *class;
- struct list_head *head;
+ struct hlist_head *head;
unsigned long flags;
int i;
int locked;
@@ -3930,9 +3934,7 @@
*/
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
- if (list_empty(head))
- continue;
- list_for_each_entry_rcu(class, head, hash_entry) {
+ hlist_for_each_entry_rcu(class, head, hash_entry) {
if (within(class->key, start, size))
zap_class(class);
else if (within(class->name, start, size))
@@ -3962,7 +3964,7 @@
void lockdep_reset_lock(struct lockdep_map *lock)
{
struct lock_class *class;
- struct list_head *head;
+ struct hlist_head *head;
unsigned long flags;
int i, j;
int locked;
@@ -3987,9 +3989,7 @@
locked = graph_lock();
for (i = 0; i < CLASSHASH_SIZE; i++) {
head = classhash_table + i;
- if (list_empty(head))
- continue;
- list_for_each_entry_rcu(class, head, hash_entry) {
+ hlist_for_each_entry_rcu(class, head, hash_entry) {
int match = 0;
for (j = 0; j < NR_LOCKDEP_CACHING_CLASSES; j++)
@@ -4027,10 +4027,10 @@
return;
for (i = 0; i < CLASSHASH_SIZE; i++)
- INIT_LIST_HEAD(classhash_table + i);
+ INIT_HLIST_HEAD(classhash_table + i);
for (i = 0; i < CHAINHASH_SIZE; i++)
- INIT_LIST_HEAD(chainhash_table + i);
+ INIT_HLIST_HEAD(chainhash_table + i);
lockdep_initialized = 1;
}
diff --git a/kernel/memremap.c b/kernel/memremap.c
index 70ee377..b981a7b 100644
--- a/kernel/memremap.c
+++ b/kernel/memremap.c
@@ -114,7 +114,7 @@
static void devm_memremap_release(struct device *dev, void *res)
{
- memunmap(res);
+ memunmap(*(void **)res);
}
static int devm_memremap_match(struct device *dev, void *res, void *match_data)
@@ -136,8 +136,10 @@
if (addr) {
*ptr = addr;
devres_add(dev, ptr);
- } else
+ } else {
devres_free(ptr);
+ return ERR_PTR(-ENXIO);
+ }
return addr;
}
@@ -150,7 +152,7 @@
}
EXPORT_SYMBOL(devm_memunmap);
-pfn_t phys_to_pfn_t(phys_addr_t addr, unsigned long flags)
+pfn_t phys_to_pfn_t(phys_addr_t addr, u64 flags)
{
return __pfn_to_pfn_t(addr >> PAGE_SHIFT, flags);
}
diff --git a/kernel/module.c b/kernel/module.c
index 8358f46..794ebe8 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -303,6 +303,9 @@
struct _ddebug *debug;
unsigned int num_debug;
bool sig_ok;
+#ifdef CONFIG_KALLSYMS
+ unsigned long mod_kallsyms_init_off;
+#endif
struct {
unsigned int sym, str, mod, vers, info, pcpu;
} index;
@@ -981,6 +984,8 @@
mod->exit();
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
+ ftrace_release_mod(mod);
+
async_synchronize_full();
/* Store the name of the last unloaded module for diagnostic purposes */
@@ -2480,10 +2485,21 @@
strsect->sh_flags |= SHF_ALLOC;
strsect->sh_entsize = get_offset(mod, &mod->init_layout.size, strsect,
info->index.str) | INIT_OFFSET_MASK;
- mod->init_layout.size = debug_align(mod->init_layout.size);
pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
+
+ /* We'll tack temporary mod_kallsyms on the end. */
+ mod->init_layout.size = ALIGN(mod->init_layout.size,
+ __alignof__(struct mod_kallsyms));
+ info->mod_kallsyms_init_off = mod->init_layout.size;
+ mod->init_layout.size += sizeof(struct mod_kallsyms);
+ mod->init_layout.size = debug_align(mod->init_layout.size);
}
+/*
+ * We use the full symtab and strtab which layout_symtab arranged to
+ * be appended to the init section. Later we switch to the cut-down
+ * core-only ones.
+ */
static void add_kallsyms(struct module *mod, const struct load_info *info)
{
unsigned int i, ndst;
@@ -2492,29 +2508,34 @@
char *s;
Elf_Shdr *symsec = &info->sechdrs[info->index.sym];
- mod->symtab = (void *)symsec->sh_addr;
- mod->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
+ /* Set up to point into init section. */
+ mod->kallsyms = mod->init_layout.base + info->mod_kallsyms_init_off;
+
+ mod->kallsyms->symtab = (void *)symsec->sh_addr;
+ mod->kallsyms->num_symtab = symsec->sh_size / sizeof(Elf_Sym);
/* Make sure we get permanent strtab: don't use info->strtab. */
- mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
+ mod->kallsyms->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
/* Set types up while we still have access to sections. */
- for (i = 0; i < mod->num_symtab; i++)
- mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
+ for (i = 0; i < mod->kallsyms->num_symtab; i++)
+ mod->kallsyms->symtab[i].st_info
+ = elf_type(&mod->kallsyms->symtab[i], info);
- mod->core_symtab = dst = mod->core_layout.base + info->symoffs;
- mod->core_strtab = s = mod->core_layout.base + info->stroffs;
- src = mod->symtab;
- for (ndst = i = 0; i < mod->num_symtab; i++) {
+ /* Now populate the cut down core kallsyms for after init. */
+ mod->core_kallsyms.symtab = dst = mod->core_layout.base + info->symoffs;
+ mod->core_kallsyms.strtab = s = mod->core_layout.base + info->stroffs;
+ src = mod->kallsyms->symtab;
+ for (ndst = i = 0; i < mod->kallsyms->num_symtab; i++) {
if (i == 0 ||
is_core_symbol(src+i, info->sechdrs, info->hdr->e_shnum,
info->index.pcpu)) {
dst[ndst] = src[i];
- dst[ndst++].st_name = s - mod->core_strtab;
- s += strlcpy(s, &mod->strtab[src[i].st_name],
+ dst[ndst++].st_name = s - mod->core_kallsyms.strtab;
+ s += strlcpy(s, &mod->kallsyms->strtab[src[i].st_name],
KSYM_NAME_LEN) + 1;
}
}
- mod->core_num_syms = ndst;
+ mod->core_kallsyms.num_symtab = ndst;
}
#else
static inline void layout_symtab(struct module *mod, struct load_info *info)
@@ -3263,9 +3284,8 @@
module_put(mod);
trim_init_extable(mod);
#ifdef CONFIG_KALLSYMS
- mod->num_symtab = mod->core_num_syms;
- mod->symtab = mod->core_symtab;
- mod->strtab = mod->core_strtab;
+ /* Switch to core kallsyms now init is done: kallsyms may be walking! */
+ rcu_assign_pointer(mod->kallsyms, &mod->core_kallsyms);
#endif
mod_tree_remove_init(mod);
disable_ro_nx(&mod->init_layout);
@@ -3295,6 +3315,7 @@
module_put(mod);
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_GOING, mod);
+ ftrace_release_mod(mod);
free_module(mod);
wake_up_all(&module_wq);
return ret;
@@ -3371,6 +3392,7 @@
mod->state = MODULE_STATE_COMING;
mutex_unlock(&module_mutex);
+ ftrace_module_enable(mod);
blocking_notifier_call_chain(&module_notify_list,
MODULE_STATE_COMING, mod);
return 0;
@@ -3496,7 +3518,7 @@
/* Module is ready to execute: parsing args may do that. */
after_dashes = parse_args(mod->name, mod->args, mod->kp, mod->num_kp,
- -32768, 32767, NULL,
+ -32768, 32767, mod,
unknown_module_param_cb);
if (IS_ERR(after_dashes)) {
err = PTR_ERR(after_dashes);
@@ -3627,6 +3649,11 @@
&& (str[2] == '\0' || str[2] == '.');
}
+static const char *symname(struct mod_kallsyms *kallsyms, unsigned int symnum)
+{
+ return kallsyms->strtab + kallsyms->symtab[symnum].st_name;
+}
+
static const char *get_ksymbol(struct module *mod,
unsigned long addr,
unsigned long *size,
@@ -3634,6 +3661,7 @@
{
unsigned int i, best = 0;
unsigned long nextval;
+ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
/* At worse, next value is at end of module */
if (within_module_init(addr, mod))
@@ -3643,32 +3671,32 @@
/* Scan for closest preceding symbol, and next symbol. (ELF
starts real symbols at 1). */
- for (i = 1; i < mod->num_symtab; i++) {
- if (mod->symtab[i].st_shndx == SHN_UNDEF)
+ for (i = 1; i < kallsyms->num_symtab; i++) {
+ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
continue;
/* We ignore unnamed symbols: they're uninformative
* and inserted at a whim. */
- if (mod->symtab[i].st_value <= addr
- && mod->symtab[i].st_value > mod->symtab[best].st_value
- && *(mod->strtab + mod->symtab[i].st_name) != '\0'
- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
+ if (*symname(kallsyms, i) == '\0'
+ || is_arm_mapping_symbol(symname(kallsyms, i)))
+ continue;
+
+ if (kallsyms->symtab[i].st_value <= addr
+ && kallsyms->symtab[i].st_value > kallsyms->symtab[best].st_value)
best = i;
- if (mod->symtab[i].st_value > addr
- && mod->symtab[i].st_value < nextval
- && *(mod->strtab + mod->symtab[i].st_name) != '\0'
- && !is_arm_mapping_symbol(mod->strtab + mod->symtab[i].st_name))
- nextval = mod->symtab[i].st_value;
+ if (kallsyms->symtab[i].st_value > addr
+ && kallsyms->symtab[i].st_value < nextval)
+ nextval = kallsyms->symtab[i].st_value;
}
if (!best)
return NULL;
if (size)
- *size = nextval - mod->symtab[best].st_value;
+ *size = nextval - kallsyms->symtab[best].st_value;
if (offset)
- *offset = addr - mod->symtab[best].st_value;
- return mod->strtab + mod->symtab[best].st_name;
+ *offset = addr - kallsyms->symtab[best].st_value;
+ return symname(kallsyms, best);
}
/* For kallsyms to ask for address resolution. NULL means not found. Careful
@@ -3758,19 +3786,21 @@
preempt_disable();
list_for_each_entry_rcu(mod, &modules, list) {
+ struct mod_kallsyms *kallsyms;
+
if (mod->state == MODULE_STATE_UNFORMED)
continue;
- if (symnum < mod->num_symtab) {
- *value = mod->symtab[symnum].st_value;
- *type = mod->symtab[symnum].st_info;
- strlcpy(name, mod->strtab + mod->symtab[symnum].st_name,
- KSYM_NAME_LEN);
+ kallsyms = rcu_dereference_sched(mod->kallsyms);
+ if (symnum < kallsyms->num_symtab) {
+ *value = kallsyms->symtab[symnum].st_value;
+ *type = kallsyms->symtab[symnum].st_info;
+ strlcpy(name, symname(kallsyms, symnum), KSYM_NAME_LEN);
strlcpy(module_name, mod->name, MODULE_NAME_LEN);
*exported = is_exported(name, *value, mod);
preempt_enable();
return 0;
}
- symnum -= mod->num_symtab;
+ symnum -= kallsyms->num_symtab;
}
preempt_enable();
return -ERANGE;
@@ -3779,11 +3809,12 @@
static unsigned long mod_find_symname(struct module *mod, const char *name)
{
unsigned int i;
+ struct mod_kallsyms *kallsyms = rcu_dereference_sched(mod->kallsyms);
- for (i = 0; i < mod->num_symtab; i++)
- if (strcmp(name, mod->strtab+mod->symtab[i].st_name) == 0 &&
- mod->symtab[i].st_info != 'U')
- return mod->symtab[i].st_value;
+ for (i = 0; i < kallsyms->num_symtab; i++)
+ if (strcmp(name, symname(kallsyms, i)) == 0 &&
+ kallsyms->symtab[i].st_info != 'U')
+ return kallsyms->symtab[i].st_value;
return 0;
}
@@ -3822,11 +3853,14 @@
module_assert_mutex();
list_for_each_entry(mod, &modules, list) {
+ /* We hold module_mutex: no need for rcu_dereference_sched */
+ struct mod_kallsyms *kallsyms = mod->kallsyms;
+
if (mod->state == MODULE_STATE_UNFORMED)
continue;
- for (i = 0; i < mod->num_symtab; i++) {
- ret = fn(data, mod->strtab + mod->symtab[i].st_name,
- mod, mod->symtab[i].st_value);
+ for (i = 0; i < kallsyms->num_symtab; i++) {
+ ret = fn(data, symname(kallsyms, i),
+ mod, kallsyms->symtab[i].st_value);
if (ret != 0)
return ret;
}
diff --git a/kernel/resource.c b/kernel/resource.c
index 09c0597..3669d1b 100644
--- a/kernel/resource.c
+++ b/kernel/resource.c
@@ -1083,9 +1083,10 @@
if (!conflict)
break;
if (conflict != parent) {
- parent = conflict;
- if (!(conflict->flags & IORESOURCE_BUSY))
+ if (!(conflict->flags & IORESOURCE_BUSY)) {
+ parent = conflict;
continue;
+ }
}
if (conflict->flags & flags & IORESOURCE_MUXED) {
add_wait_queue(&muxed_resource_wait, &wait);
diff --git a/kernel/sched/deadline.c b/kernel/sched/deadline.c
index cd64c97..57b939c 100644
--- a/kernel/sched/deadline.c
+++ b/kernel/sched/deadline.c
@@ -420,7 +420,7 @@
* entity.
*/
if (dl_time_before(dl_se->deadline, rq_clock(rq))) {
- printk_deferred_once("sched: DL replenish lagged to much\n");
+ printk_deferred_once("sched: DL replenish lagged too much\n");
dl_se->deadline = rq_clock(rq) + pi_se->dl_deadline;
dl_se->runtime = pi_se->dl_runtime;
}
diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
index eca592f..57a6eea 100644
--- a/kernel/trace/ftrace.c
+++ b/kernel/trace/ftrace.c
@@ -4961,7 +4961,7 @@
mutex_unlock(&ftrace_lock);
}
-static void ftrace_module_enable(struct module *mod)
+void ftrace_module_enable(struct module *mod)
{
struct dyn_ftrace *rec;
struct ftrace_page *pg;
@@ -5038,38 +5038,8 @@
ftrace_process_locs(mod, mod->ftrace_callsites,
mod->ftrace_callsites + mod->num_ftrace_callsites);
}
-
-static int ftrace_module_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- struct module *mod = data;
-
- switch (val) {
- case MODULE_STATE_COMING:
- ftrace_module_enable(mod);
- break;
- case MODULE_STATE_GOING:
- ftrace_release_mod(mod);
- break;
- default:
- break;
- }
-
- return 0;
-}
-#else
-static int ftrace_module_notify(struct notifier_block *self,
- unsigned long val, void *data)
-{
- return 0;
-}
#endif /* CONFIG_MODULES */
-struct notifier_block ftrace_module_nb = {
- .notifier_call = ftrace_module_notify,
- .priority = INT_MIN, /* Run after anything that can remove kprobes */
-};
-
void __init ftrace_init(void)
{
extern unsigned long __start_mcount_loc[];
@@ -5098,10 +5068,6 @@
__start_mcount_loc,
__stop_mcount_loc);
- ret = register_module_notifier(&ftrace_module_nb);
- if (ret)
- pr_warning("Failed to register trace ftrace module exit notifier\n");
-
set_ftrace_early_filters();
return;
diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
index f333e57..ab09829 100644
--- a/kernel/trace/trace_events.c
+++ b/kernel/trace/trace_events.c
@@ -869,7 +869,8 @@
* The ftrace subsystem is for showing formats only.
* They can not be enabled or disabled via the event files.
*/
- if (call->class && call->class->reg)
+ if (call->class && call->class->reg &&
+ !(call->flags & TRACE_EVENT_FL_IGNORE_ENABLE))
return file;
}
diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
index 202df6c..2a1abba 100644
--- a/kernel/trace/trace_stack.c
+++ b/kernel/trace/trace_stack.c
@@ -156,7 +156,11 @@
for (; p < top && i < stack_trace_max.nr_entries; p++) {
if (stack_dump_trace[i] == ULONG_MAX)
break;
- if (*p == stack_dump_trace[i]) {
+ /*
+ * The READ_ONCE_NOCHECK is used to let KASAN know that
+ * this is not a stack-out-of-bounds error.
+ */
+ if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
stack_dump_trace[x] = stack_dump_trace[i++];
this_size = stack_trace_index[x++] =
(top - p) * sizeof(unsigned long);
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index 61a0264..7ff5dc7 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -301,7 +301,23 @@
static LIST_HEAD(workqueues); /* PR: list of all workqueues */
static bool workqueue_freezing; /* PL: have wqs started freezing? */
-static cpumask_var_t wq_unbound_cpumask; /* PL: low level cpumask for all unbound wqs */
+/* PL: allowable cpus for unbound wqs and work items */
+static cpumask_var_t wq_unbound_cpumask;
+
+/* CPU where unbound work was last round robin scheduled from this CPU */
+static DEFINE_PER_CPU(int, wq_rr_cpu_last);
+
+/*
+ * Local execution of unbound work items is no longer guaranteed. The
+ * following always forces round-robin CPU selection on unbound work items
+ * to uncover usages which depend on it.
+ */
+#ifdef CONFIG_DEBUG_WQ_FORCE_RR_CPU
+static bool wq_debug_force_rr_cpu = true;
+#else
+static bool wq_debug_force_rr_cpu = false;
+#endif
+module_param_named(debug_force_rr_cpu, wq_debug_force_rr_cpu, bool, 0644);
/* the per-cpu worker pools */
static DEFINE_PER_CPU_SHARED_ALIGNED(struct worker_pool [NR_STD_WORKER_POOLS],
@@ -570,6 +586,16 @@
int node)
{
assert_rcu_or_wq_mutex_or_pool_mutex(wq);
+
+ /*
+ * XXX: @node can be NUMA_NO_NODE if CPU goes offline while a
+ * delayed item is pending. The plan is to keep CPU -> NODE
+ * mapping valid and stable across CPU on/offlines. Once that
+ * happens, this workaround can be removed.
+ */
+ if (unlikely(node == NUMA_NO_NODE))
+ return wq->dfl_pwq;
+
return rcu_dereference_raw(wq->numa_pwq_tbl[node]);
}
@@ -1298,6 +1324,39 @@
return worker && worker->current_pwq->wq == wq;
}
+/*
+ * When queueing an unbound work item to a wq, prefer local CPU if allowed
+ * by wq_unbound_cpumask. Otherwise, round robin among the allowed ones to
+ * avoid perturbing sensitive tasks.
+ */
+static int wq_select_unbound_cpu(int cpu)
+{
+ static bool printed_dbg_warning;
+ int new_cpu;
+
+ if (likely(!wq_debug_force_rr_cpu)) {
+ if (cpumask_test_cpu(cpu, wq_unbound_cpumask))
+ return cpu;
+ } else if (!printed_dbg_warning) {
+ pr_warn("workqueue: round-robin CPU selection forced, expect performance impact\n");
+ printed_dbg_warning = true;
+ }
+
+ if (cpumask_empty(wq_unbound_cpumask))
+ return cpu;
+
+ new_cpu = __this_cpu_read(wq_rr_cpu_last);
+ new_cpu = cpumask_next_and(new_cpu, wq_unbound_cpumask, cpu_online_mask);
+ if (unlikely(new_cpu >= nr_cpu_ids)) {
+ new_cpu = cpumask_first_and(wq_unbound_cpumask, cpu_online_mask);
+ if (unlikely(new_cpu >= nr_cpu_ids))
+ return cpu;
+ }
+ __this_cpu_write(wq_rr_cpu_last, new_cpu);
+
+ return new_cpu;
+}
+
static void __queue_work(int cpu, struct workqueue_struct *wq,
struct work_struct *work)
{
@@ -1323,7 +1382,7 @@
return;
retry:
if (req_cpu == WORK_CPU_UNBOUND)
- cpu = raw_smp_processor_id();
+ cpu = wq_select_unbound_cpu(raw_smp_processor_id());
/* pwq which will be used unless @work is executing elsewhere */
if (!(wq->flags & WQ_UNBOUND))
@@ -1464,13 +1523,13 @@
timer_stats_timer_set_start_info(&dwork->timer);
dwork->wq = wq;
- /* timer isn't guaranteed to run in this cpu, record earlier */
- if (cpu == WORK_CPU_UNBOUND)
- cpu = raw_smp_processor_id();
dwork->cpu = cpu;
timer->expires = jiffies + delay;
- add_timer_on(timer, cpu);
+ if (unlikely(cpu != WORK_CPU_UNBOUND))
+ add_timer_on(timer, cpu);
+ else
+ add_timer(timer);
}
/**
@@ -2355,7 +2414,8 @@
WARN_ONCE(current->flags & PF_MEMALLOC,
"workqueue: PF_MEMALLOC task %d(%s) is flushing !WQ_MEM_RECLAIM %s:%pf",
current->pid, current->comm, target_wq->name, target_func);
- WARN_ONCE(worker && (worker->current_pwq->wq->flags & WQ_MEM_RECLAIM),
+ WARN_ONCE(worker && ((worker->current_pwq->wq->flags &
+ (WQ_MEM_RECLAIM | __WQ_LEGACY)) == WQ_MEM_RECLAIM),
"workqueue: WQ_MEM_RECLAIM %s:%pf is flushing !WQ_MEM_RECLAIM %s:%pf",
worker->current_pwq->wq->name, worker->current_func,
target_wq->name, target_func);
diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
index ecb9e75..8bfd1ac 100644
--- a/lib/Kconfig.debug
+++ b/lib/Kconfig.debug
@@ -1400,6 +1400,21 @@
endmenu # "RCU Debugging"
+config DEBUG_WQ_FORCE_RR_CPU
+ bool "Force round-robin CPU selection for unbound work items"
+ depends on DEBUG_KERNEL
+ default n
+ help
+ Workqueue used to implicitly guarantee that work items queued
+ without explicit CPU specified are put on the local CPU. This
+ guarantee is no longer true and while local CPU is still
+ preferred work items may be put on foreign CPUs. Kernel
+ parameter "workqueue.debug_force_rr_cpu" is added to force
+ round-robin CPU selection to flush out usages which depend on the
+ now broken guarantee. This config option enables the debug
+ feature by default. When enabled, memory and cache locality will
+ be impacted.
+
config DEBUG_BLOCK_EXT_DEVT
bool "Force extended block device numbers and spread them"
depends on DEBUG_KERNEL
diff --git a/lib/Kconfig.ubsan b/lib/Kconfig.ubsan
index 49518fb..e07c1ba 100644
--- a/lib/Kconfig.ubsan
+++ b/lib/Kconfig.ubsan
@@ -18,6 +18,8 @@
This option activates instrumentation for the entire kernel.
If you don't enable this option, you have to explicitly specify
UBSAN_SANITIZE := y for the files/directories you want to check for UB.
+ Enabling this option will get kernel image size increased
+ significantly.
config UBSAN_ALIGNMENT
bool "Enable checking of pointers alignment"
@@ -25,5 +27,5 @@
default y if !HAVE_EFFICIENT_UNALIGNED_ACCESS
help
This option enables detection of unaligned memory accesses.
- Enabling this option on architectures that support unalligned
+ Enabling this option on architectures that support unaligned
accesses may produce a lot of false positives.
diff --git a/lib/klist.c b/lib/klist.c
index d74cf7a..0507fa5 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -282,9 +282,9 @@
struct klist_node *n)
{
i->i_klist = k;
- i->i_cur = n;
- if (n)
- kref_get(&n->n_ref);
+ i->i_cur = NULL;
+ if (n && kref_get_unless_zero(&n->n_ref))
+ i->i_cur = n;
}
EXPORT_SYMBOL_GPL(klist_iter_init_node);
diff --git a/lib/ucs2_string.c b/lib/ucs2_string.c
index 6f500ef..f0b323a 100644
--- a/lib/ucs2_string.c
+++ b/lib/ucs2_string.c
@@ -49,3 +49,65 @@
}
}
EXPORT_SYMBOL(ucs2_strncmp);
+
+unsigned long
+ucs2_utf8size(const ucs2_char_t *src)
+{
+ unsigned long i;
+ unsigned long j = 0;
+
+ for (i = 0; i < ucs2_strlen(src); i++) {
+ u16 c = src[i];
+
+ if (c >= 0x800)
+ j += 3;
+ else if (c >= 0x80)
+ j += 2;
+ else
+ j += 1;
+ }
+
+ return j;
+}
+EXPORT_SYMBOL(ucs2_utf8size);
+
+/*
+ * copy at most maxlength bytes of whole utf8 characters to dest from the
+ * ucs2 string src.
+ *
+ * The return value is the number of characters copied, not including the
+ * final NUL character.
+ */
+unsigned long
+ucs2_as_utf8(u8 *dest, const ucs2_char_t *src, unsigned long maxlength)
+{
+ unsigned int i;
+ unsigned long j = 0;
+ unsigned long limit = ucs2_strnlen(src, maxlength);
+
+ for (i = 0; maxlength && i < limit; i++) {
+ u16 c = src[i];
+
+ if (c >= 0x800) {
+ if (maxlength < 3)
+ break;
+ maxlength -= 3;
+ dest[j++] = 0xe0 | (c & 0xf000) >> 12;
+ dest[j++] = 0x80 | (c & 0x0fc0) >> 6;
+ dest[j++] = 0x80 | (c & 0x003f);
+ } else if (c >= 0x80) {
+ if (maxlength < 2)
+ break;
+ maxlength -= 2;
+ dest[j++] = 0xc0 | (c & 0x7c0) >> 6;
+ dest[j++] = 0x80 | (c & 0x03f);
+ } else {
+ maxlength -= 1;
+ dest[j++] = c & 0x7f;
+ }
+ }
+ if (maxlength)
+ dest[j] = '\0';
+ return j;
+}
+EXPORT_SYMBOL(ucs2_as_utf8);
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index 48ff9c3..f44e178 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -1590,22 +1590,23 @@
return buf;
}
case 'K':
- /*
- * %pK cannot be used in IRQ context because its test
- * for CAP_SYSLOG would be meaningless.
- */
- if (kptr_restrict && (in_irq() || in_serving_softirq() ||
- in_nmi())) {
- if (spec.field_width == -1)
- spec.field_width = default_width;
- return string(buf, end, "pK-error", spec);
- }
-
switch (kptr_restrict) {
case 0:
/* Always print %pK values */
break;
case 1: {
+ const struct cred *cred;
+
+ /*
+ * kptr_restrict==1 cannot be used in IRQ context
+ * because its test for CAP_SYSLOG would be meaningless.
+ */
+ if (in_irq() || in_serving_softirq() || in_nmi()) {
+ if (spec.field_width == -1)
+ spec.field_width = default_width;
+ return string(buf, end, "pK-error", spec);
+ }
+
/*
* Only print the real pointer value if the current
* process has CAP_SYSLOG and is running with the
@@ -1615,8 +1616,7 @@
* leak pointer values if a binary opens a file using
* %pK and then elevates privileges before reading it.
*/
- const struct cred *cred = current_cred();
-
+ cred = current_cred();
if (!has_capability_noaudit(current, CAP_SYSLOG) ||
!uid_eq(cred->euid, cred->uid) ||
!gid_eq(cred->egid, cred->gid))
diff --git a/mm/backing-dev.c b/mm/backing-dev.c
index 926c76d..c554d17 100644
--- a/mm/backing-dev.c
+++ b/mm/backing-dev.c
@@ -328,7 +328,7 @@
return 0;
out_destroy_stat:
- while (--i)
+ while (i--)
percpu_counter_destroy(&wb->stat[i]);
fprop_local_destroy_percpu(&wb->completions);
out_put_cong:
diff --git a/mm/filemap.c b/mm/filemap.c
index bc94386..3461d97 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -446,7 +446,8 @@
{
int err = 0;
- if (mapping->nrpages) {
+ if ((!dax_mapping(mapping) && mapping->nrpages) ||
+ (dax_mapping(mapping) && mapping->nrexceptional)) {
err = filemap_fdatawrite(mapping);
/*
* Even if the above returned error, the pages may be
@@ -482,13 +483,8 @@
{
int err = 0;
- if (dax_mapping(mapping) && mapping->nrexceptional) {
- err = dax_writeback_mapping_range(mapping, lstart, lend);
- if (err)
- return err;
- }
-
- if (mapping->nrpages) {
+ if ((!dax_mapping(mapping) && mapping->nrpages) ||
+ (dax_mapping(mapping) && mapping->nrexceptional)) {
err = __filemap_fdatawrite_range(mapping, lstart, lend,
WB_SYNC_ALL);
/* See comment of filemap_write_and_wait() */
@@ -1890,6 +1886,7 @@
* page_cache_read - adds requested page to the page cache if not already there
* @file: file to read
* @offset: page index
+ * @gfp_mask: memory allocation flags
*
* This adds the requested page to the page cache if it isn't already there,
* and schedules an I/O to read in its contents from disk.
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 08fc0ba..e10a4fe 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1700,7 +1700,8 @@
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
VM_BUG_ON(!pmd_none(*new_pmd));
- if (pmd_move_must_withdraw(new_ptl, old_ptl)) {
+ if (pmd_move_must_withdraw(new_ptl, old_ptl) &&
+ vma_is_anonymous(vma)) {
pgtable_t pgtable;
pgtable = pgtable_trans_huge_withdraw(mm, old_pmd);
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
@@ -2835,6 +2836,7 @@
pgtable_t pgtable;
pmd_t _pmd;
bool young, write, dirty;
+ unsigned long addr;
int i;
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
@@ -2860,10 +2862,11 @@
young = pmd_young(*pmd);
dirty = pmd_dirty(*pmd);
+ pmdp_huge_split_prepare(vma, haddr, pmd);
pgtable = pgtable_trans_huge_withdraw(mm, pmd);
pmd_populate(mm, &_pmd, pgtable);
- for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ for (i = 0, addr = haddr; i < HPAGE_PMD_NR; i++, addr += PAGE_SIZE) {
pte_t entry, *pte;
/*
* Note that NUMA hinting access restrictions are not
@@ -2884,9 +2887,9 @@
}
if (dirty)
SetPageDirty(page + i);
- pte = pte_offset_map(&_pmd, haddr);
+ pte = pte_offset_map(&_pmd, addr);
BUG_ON(!pte_none(*pte));
- set_pte_at(mm, haddr, pte, entry);
+ set_pte_at(mm, addr, pte, entry);
atomic_inc(&page[i]._mapcount);
pte_unmap(pte);
}
@@ -2936,7 +2939,7 @@
pmd_populate(mm, pmd, pgtable);
if (freeze) {
- for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) {
+ for (i = 0; i < HPAGE_PMD_NR; i++) {
page_remove_rmap(page + i, false);
put_page(page + i);
}
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 06ae13e..01f2b48 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2630,8 +2630,10 @@
hugetlb_add_hstate(HUGETLB_PAGE_ORDER);
}
default_hstate_idx = hstate_index(size_to_hstate(default_hstate_size));
- if (default_hstate_max_huge_pages)
- default_hstate.max_huge_pages = default_hstate_max_huge_pages;
+ if (default_hstate_max_huge_pages) {
+ if (!default_hstate.max_huge_pages)
+ default_hstate.max_huge_pages = default_hstate_max_huge_pages;
+ }
hugetlb_init_hstates();
gather_bootmem_prealloc();
diff --git a/mm/memory.c b/mm/memory.c
index 635451a..8132787 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3404,8 +3404,18 @@
if (unlikely(pmd_none(*pmd)) &&
unlikely(__pte_alloc(mm, vma, pmd, address)))
return VM_FAULT_OOM;
- /* if an huge pmd materialized from under us just retry later */
- if (unlikely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
+ /*
+ * If a huge pmd materialized under us just retry later. Use
+ * pmd_trans_unstable() instead of pmd_trans_huge() to ensure the pmd
+ * didn't become pmd_trans_huge under us and then back to pmd_none, as
+ * a result of MADV_DONTNEED running immediately after a huge pmd fault
+ * in a different thread of this mm, in turn leading to a misleading
+ * pmd_trans_huge() retval. All we have to ensure is that it is a
+ * regular pmd that we can walk with pte_offset_map() and we can do that
+ * through an atomic read in C, which is what pmd_trans_unstable()
+ * provides.
+ */
+ if (unlikely(pmd_trans_unstable(pmd) || pmd_devmap(*pmd)))
return 0;
/*
* A regular pmd is established and it can't morph into a huge pmd
diff --git a/mm/migrate.c b/mm/migrate.c
index b1034f9..3ad0fea 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1582,7 +1582,7 @@
(GFP_HIGHUSER_MOVABLE |
__GFP_THISNODE | __GFP_NOMEMALLOC |
__GFP_NORETRY | __GFP_NOWARN) &
- ~(__GFP_IO | __GFP_FS), 0);
+ ~__GFP_RECLAIM, 0);
return newpage;
}
diff --git a/mm/mmap.c b/mm/mmap.c
index 2f2415a..76d1ec2 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2664,12 +2664,29 @@
if (!vma || !(vma->vm_flags & VM_SHARED))
goto out;
- if (start < vma->vm_start || start + size > vma->vm_end)
+ if (start < vma->vm_start)
goto out;
- if (pgoff == linear_page_index(vma, start)) {
- ret = 0;
- goto out;
+ if (start + size > vma->vm_end) {
+ struct vm_area_struct *next;
+
+ for (next = vma->vm_next; next; next = next->vm_next) {
+ /* hole between vmas ? */
+ if (next->vm_start != next->vm_prev->vm_end)
+ goto out;
+
+ if (next->vm_file != vma->vm_file)
+ goto out;
+
+ if (next->vm_flags != vma->vm_flags)
+ goto out;
+
+ if (start + size <= next->vm_end)
+ break;
+ }
+
+ if (!next)
+ goto out;
}
prot |= vma->vm_flags & VM_READ ? PROT_READ : 0;
@@ -2679,9 +2696,16 @@
flags &= MAP_NONBLOCK;
flags |= MAP_SHARED | MAP_FIXED | MAP_POPULATE;
if (vma->vm_flags & VM_LOCKED) {
+ struct vm_area_struct *tmp;
flags |= MAP_LOCKED;
+
/* drop PG_Mlocked flag for over-mapped range */
- munlock_vma_pages_range(vma, start, start + size);
+ for (tmp = vma; tmp->vm_start >= start + size;
+ tmp = tmp->vm_next) {
+ munlock_vma_pages_range(tmp,
+ max(tmp->vm_start, start),
+ min(tmp->vm_end, start + size));
+ }
}
file = get_file(vma->vm_file);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 8eb7bb4..f7cb3d4 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -160,9 +160,11 @@
}
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
- if (next - addr != HPAGE_PMD_SIZE)
+ if (next - addr != HPAGE_PMD_SIZE) {
split_huge_pmd(vma, pmd, addr);
- else {
+ if (pmd_none(*pmd))
+ continue;
+ } else {
int nr_ptes = change_huge_pmd(vma, pmd, addr,
newprot, prot_numa);
diff --git a/mm/mremap.c b/mm/mremap.c
index d77946a..8eeba02 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -210,6 +210,8 @@
}
}
split_huge_pmd(vma, old_pmd, old_addr);
+ if (pmd_none(*old_pmd))
+ continue;
VM_BUG_ON(pmd_trans_huge(*old_pmd));
}
if (pmd_none(*new_pmd) && __pte_alloc(new_vma->vm_mm, new_vma,
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 9d47676..06a005b 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -90,9 +90,9 @@
* ARCHes with special requirements for evicting THP backing TLB entries can
* implement this. Otherwise also, it can help optimize normal TLB flush in
* THP regime. stock flush_tlb_range() typically has optimization to nuke the
- * entire TLB TLB if flush span is greater than a threshhold, which will
+ * entire TLB if flush span is greater than a threshold, which will
* likely be true for a single huge page. Thus a single thp flush will
- * invalidate the entire TLB which is not desitable.
+ * invalidate the entire TLB which is not desirable.
* e.g. see arch/arc: flush_pmd_tlb_range
*/
#define flush_pmd_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end)
@@ -195,7 +195,9 @@
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
- flush_pmd_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
+
+ /* collapse entails shooting down ptes not pmd */
+ flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
return pmd;
}
#endif
diff --git a/mm/slab.c b/mm/slab.c
index 6ecc697..621fbcb 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2275,7 +2275,7 @@
err = setup_cpu_cache(cachep, gfp);
if (err) {
- __kmem_cache_shutdown(cachep);
+ __kmem_cache_release(cachep);
return err;
}
@@ -2414,12 +2414,13 @@
int __kmem_cache_shutdown(struct kmem_cache *cachep)
{
+ return __kmem_cache_shrink(cachep, false);
+}
+
+void __kmem_cache_release(struct kmem_cache *cachep)
+{
int i;
struct kmem_cache_node *n;
- int rc = __kmem_cache_shrink(cachep, false);
-
- if (rc)
- return rc;
free_percpu(cachep->cpu_cache);
@@ -2430,7 +2431,6 @@
kfree(n);
cachep->node[i] = NULL;
}
- return 0;
}
/*
diff --git a/mm/slab.h b/mm/slab.h
index 834ad24..2eedace 100644
--- a/mm/slab.h
+++ b/mm/slab.h
@@ -140,6 +140,7 @@
#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
int __kmem_cache_shutdown(struct kmem_cache *);
+void __kmem_cache_release(struct kmem_cache *);
int __kmem_cache_shrink(struct kmem_cache *, bool);
void slab_kmem_cache_release(struct kmem_cache *);
diff --git a/mm/slab_common.c b/mm/slab_common.c
index b50aef0..065b7bd 100644
--- a/mm/slab_common.c
+++ b/mm/slab_common.c
@@ -693,6 +693,7 @@
void slab_kmem_cache_release(struct kmem_cache *s)
{
+ __kmem_cache_release(s);
destroy_memcg_params(s);
kfree_const(s->name);
kmem_cache_free(kmem_cache, s);
diff --git a/mm/slob.c b/mm/slob.c
index 17e8f8c..5ec1580 100644
--- a/mm/slob.c
+++ b/mm/slob.c
@@ -630,6 +630,10 @@
return 0;
}
+void __kmem_cache_release(struct kmem_cache *c)
+{
+}
+
int __kmem_cache_shrink(struct kmem_cache *d, bool deactivate)
{
return 0;
diff --git a/mm/slub.c b/mm/slub.c
index 2e1355a..d8fbd4a 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1592,18 +1592,12 @@
__add_partial(n, page, tail);
}
-static inline void
-__remove_partial(struct kmem_cache_node *n, struct page *page)
-{
- list_del(&page->lru);
- n->nr_partial--;
-}
-
static inline void remove_partial(struct kmem_cache_node *n,
struct page *page)
{
lockdep_assert_held(&n->list_lock);
- __remove_partial(n, page);
+ list_del(&page->lru);
+ n->nr_partial--;
}
/*
@@ -3184,6 +3178,12 @@
}
}
+void __kmem_cache_release(struct kmem_cache *s)
+{
+ free_percpu(s->cpu_slab);
+ free_kmem_cache_nodes(s);
+}
+
static int init_kmem_cache_nodes(struct kmem_cache *s)
{
int node;
@@ -3443,28 +3443,31 @@
/*
* Attempt to free all partial slabs on a node.
- * This is called from kmem_cache_close(). We must be the last thread
- * using the cache and therefore we do not need to lock anymore.
+ * This is called from __kmem_cache_shutdown(). We must take list_lock
+ * because sysfs file might still access partial list after the shutdowning.
*/
static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
{
struct page *page, *h;
+ BUG_ON(irqs_disabled());
+ spin_lock_irq(&n->list_lock);
list_for_each_entry_safe(page, h, &n->partial, lru) {
if (!page->inuse) {
- __remove_partial(n, page);
+ remove_partial(n, page);
discard_slab(s, page);
} else {
list_slab_objects(s, page,
- "Objects remaining in %s on kmem_cache_close()");
+ "Objects remaining in %s on __kmem_cache_shutdown()");
}
}
+ spin_unlock_irq(&n->list_lock);
}
/*
* Release all resources used by a slab cache.
*/
-static inline int kmem_cache_close(struct kmem_cache *s)
+int __kmem_cache_shutdown(struct kmem_cache *s)
{
int node;
struct kmem_cache_node *n;
@@ -3476,16 +3479,9 @@
if (n->nr_partial || slabs_node(s, node))
return 1;
}
- free_percpu(s->cpu_slab);
- free_kmem_cache_nodes(s);
return 0;
}
-int __kmem_cache_shutdown(struct kmem_cache *s)
-{
- return kmem_cache_close(s);
-}
-
/********************************************************************
* Kmalloc subsystem
*******************************************************************/
@@ -3980,7 +3976,7 @@
memcg_propagate_slab_attrs(s);
err = sysfs_slab_add(s);
if (err)
- kmem_cache_close(s);
+ __kmem_cache_release(s);
return err;
}
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c
index d5871ac..f066781 100644
--- a/net/appletalk/ddp.c
+++ b/net/appletalk/ddp.c
@@ -1625,7 +1625,7 @@
rt = atrtr_find(&at_hint);
}
- err = ENETUNREACH;
+ err = -ENETUNREACH;
if (!rt)
goto out;
diff --git a/net/batman-adv/gateway_client.c b/net/batman-adv/gateway_client.c
index e6c8382..ccf70be 100644
--- a/net/batman-adv/gateway_client.c
+++ b/net/batman-adv/gateway_client.c
@@ -527,11 +527,12 @@
* gets dereferenced.
*/
spin_lock_bh(&bat_priv->gw.list_lock);
- hlist_del_init_rcu(&gw_node->list);
+ if (!hlist_unhashed(&gw_node->list)) {
+ hlist_del_init_rcu(&gw_node->list);
+ batadv_gw_node_free_ref(gw_node);
+ }
spin_unlock_bh(&bat_priv->gw.list_lock);
- batadv_gw_node_free_ref(gw_node);
-
curr_gw = batadv_gw_get_selected_gw_node(bat_priv);
if (gw_node == curr_gw)
batadv_gw_reselect(bat_priv);
diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
index 01acccc..57f71071 100644
--- a/net/batman-adv/hard-interface.c
+++ b/net/batman-adv/hard-interface.c
@@ -76,6 +76,28 @@
}
/**
+ * batadv_mutual_parents - check if two devices are each others parent
+ * @dev1: 1st net_device
+ * @dev2: 2nd net_device
+ *
+ * veth devices come in pairs and each is the parent of the other!
+ *
+ * Return: true if the devices are each others parent, otherwise false
+ */
+static bool batadv_mutual_parents(const struct net_device *dev1,
+ const struct net_device *dev2)
+{
+ int dev1_parent_iflink = dev_get_iflink(dev1);
+ int dev2_parent_iflink = dev_get_iflink(dev2);
+
+ if (!dev1_parent_iflink || !dev2_parent_iflink)
+ return false;
+
+ return (dev1_parent_iflink == dev2->ifindex) &&
+ (dev2_parent_iflink == dev1->ifindex);
+}
+
+/**
* batadv_is_on_batman_iface - check if a device is a batman iface descendant
* @net_dev: the device to check
*
@@ -108,6 +130,9 @@
if (WARN(!parent_dev, "Cannot find parent device"))
return false;
+ if (batadv_mutual_parents(net_dev, parent_dev))
+ return false;
+
ret = batadv_is_on_batman_iface(parent_dev);
return ret;
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index cdfc85f..0e80fd1 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -303,9 +303,11 @@
if (atomic_add_return(v, &vlan->tt.num_entries) == 0) {
spin_lock_bh(&orig_node->vlan_list_lock);
- hlist_del_init_rcu(&vlan->list);
+ if (!hlist_unhashed(&vlan->list)) {
+ hlist_del_init_rcu(&vlan->list);
+ batadv_orig_node_vlan_free_ref(vlan);
+ }
spin_unlock_bh(&orig_node->vlan_list_lock);
- batadv_orig_node_vlan_free_ref(vlan);
}
batadv_orig_node_vlan_free_ref(vlan);
diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
index 47bcef754..883c821 100644
--- a/net/bluetooth/hci_core.c
+++ b/net/bluetooth/hci_core.c
@@ -4112,8 +4112,10 @@
break;
}
- *req_complete = bt_cb(skb)->hci.req_complete;
- *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
+ if (bt_cb(skb)->hci.req_flags & HCI_REQ_SKB)
+ *req_complete_skb = bt_cb(skb)->hci.req_complete_skb;
+ else
+ *req_complete = bt_cb(skb)->hci.req_complete;
kfree_skb(skb);
}
spin_unlock_irqrestore(&hdev->cmd_q.lock, flags);
diff --git a/net/bridge/br_mdb.c b/net/bridge/br_mdb.c
index 30e105f..74c278e 100644
--- a/net/bridge/br_mdb.c
+++ b/net/bridge/br_mdb.c
@@ -425,8 +425,8 @@
mp = br_mdb_ip_get(mdb, group);
if (!mp) {
mp = br_multicast_new_group(br, port, group);
- err = PTR_ERR(mp);
- if (IS_ERR(mp))
+ err = PTR_ERR_OR_ZERO(mp);
+ if (err)
return err;
}
diff --git a/net/caif/cfrfml.c b/net/caif/cfrfml.c
index 61d7617..b82440e 100644
--- a/net/caif/cfrfml.c
+++ b/net/caif/cfrfml.c
@@ -159,7 +159,7 @@
tmppkt = NULL;
/* Verify that length is correct */
- err = EPROTO;
+ err = -EPROTO;
if (rfml->pdu_size != cfpkt_getlen(pkt) - RFM_HEAD_SIZE + 1)
goto out;
}
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 9cfedf5..9382619 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -1197,6 +1197,13 @@
return new_piece;
}
+static size_t sizeof_footer(struct ceph_connection *con)
+{
+ return (con->peer_features & CEPH_FEATURE_MSG_AUTH) ?
+ sizeof(struct ceph_msg_footer) :
+ sizeof(struct ceph_msg_footer_old);
+}
+
static void prepare_message_data(struct ceph_msg *msg, u32 data_len)
{
BUG_ON(!msg);
@@ -2335,9 +2342,9 @@
ceph_pr_addr(&con->peer_addr.in_addr),
seq, con->in_seq + 1);
con->in_base_pos = -front_len - middle_len - data_len -
- sizeof(m->footer);
+ sizeof_footer(con);
con->in_tag = CEPH_MSGR_TAG_READY;
- return 0;
+ return 1;
} else if ((s64)seq - (s64)con->in_seq > 1) {
pr_err("read_partial_message bad seq %lld expected %lld\n",
seq, con->in_seq + 1);
@@ -2360,10 +2367,10 @@
/* skip this message */
dout("alloc_msg said skip message\n");
con->in_base_pos = -front_len - middle_len - data_len -
- sizeof(m->footer);
+ sizeof_footer(con);
con->in_tag = CEPH_MSGR_TAG_READY;
con->in_seq++;
- return 0;
+ return 1;
}
BUG_ON(!con->in_msg);
diff --git a/net/ceph/osd_client.c b/net/ceph/osd_client.c
index 3534e12..5bc0537 100644
--- a/net/ceph/osd_client.c
+++ b/net/ceph/osd_client.c
@@ -2853,8 +2853,8 @@
mutex_lock(&osdc->request_mutex);
req = __lookup_request(osdc, tid);
if (!req) {
- pr_warn("%s osd%d tid %llu unknown, skipping\n",
- __func__, osd->o_osd, tid);
+ dout("%s osd%d tid %llu unknown, skipping\n", __func__,
+ osd->o_osd, tid);
m = NULL;
*skip = 1;
goto out;
diff --git a/net/core/dev.c b/net/core/dev.c
index 8cba3d8..0ef061b 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -5379,12 +5379,12 @@
{
struct netdev_adjacent *lower;
- lower = list_entry((*iter)->next, struct netdev_adjacent, list);
+ lower = list_entry(*iter, struct netdev_adjacent, list);
if (&lower->list == &dev->adj_list.lower)
return NULL;
- *iter = &lower->list;
+ *iter = lower->list.next;
return lower->dev;
}
@@ -7422,8 +7422,10 @@
dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
setup(dev);
- if (!dev->tx_queue_len)
+ if (!dev->tx_queue_len) {
dev->priv_flags |= IFF_NO_QUEUE;
+ dev->tx_queue_len = 1;
+ }
dev->num_tx_queues = txqs;
dev->real_num_tx_queues = txqs;
diff --git a/net/core/flow_dissector.c b/net/core/flow_dissector.c
index d79699c..12e7003 100644
--- a/net/core/flow_dissector.c
+++ b/net/core/flow_dissector.c
@@ -208,7 +208,6 @@
case htons(ETH_P_IPV6): {
const struct ipv6hdr *iph;
struct ipv6hdr _iph;
- __be32 flow_label;
ipv6:
iph = __skb_header_pointer(skb, nhoff, sizeof(_iph), data, hlen, &_iph);
@@ -230,8 +229,12 @@
key_control->addr_type = FLOW_DISSECTOR_KEY_IPV6_ADDRS;
}
- flow_label = ip6_flowlabel(iph);
- if (flow_label) {
+ if ((dissector_uses_key(flow_dissector,
+ FLOW_DISSECTOR_KEY_FLOW_LABEL) ||
+ (flags & FLOW_DISSECTOR_F_STOP_AT_FLOW_LABEL)) &&
+ ip6_flowlabel(iph)) {
+ __be32 flow_label = ip6_flowlabel(iph);
+
if (dissector_uses_key(flow_dissector,
FLOW_DISSECTOR_KEY_FLOW_LABEL)) {
key_tags = skb_flow_dissector_target(flow_dissector,
@@ -396,6 +399,13 @@
goto out_bad;
proto = eth->h_proto;
nhoff += sizeof(*eth);
+
+ /* Cap headers that we access via pointers at the
+ * end of the Ethernet header as our maximum alignment
+ * at that point is only 2 bytes.
+ */
+ if (NET_IP_ALIGN)
+ hlen = nhoff;
}
key_control->flags |= FLOW_DIS_ENCAPSULATION;
diff --git a/net/core/scm.c b/net/core/scm.c
index 14596fb..2696aef 100644
--- a/net/core/scm.c
+++ b/net/core/scm.c
@@ -87,6 +87,7 @@
*fplp = fpl;
fpl->count = 0;
fpl->max = SCM_MAX_FD;
+ fpl->user = NULL;
}
fpp = &fpl->fp[fpl->count];
@@ -107,6 +108,10 @@
*fpp++ = file;
fpl->count++;
}
+
+ if (!fpl->user)
+ fpl->user = get_uid(current_user());
+
return num;
}
@@ -119,6 +124,7 @@
scm->fp = NULL;
for (i=fpl->count-1; i>=0; i--)
fput(fpl->fp[i]);
+ free_uid(fpl->user);
kfree(fpl);
}
}
@@ -336,6 +342,7 @@
for (i = 0; i < fpl->count; i++)
get_file(fpl->fp[i]);
new_fpl->max = new_fpl->count;
+ new_fpl->user = get_uid(fpl->user);
}
return new_fpl;
}
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b2df375..5bf88f5 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -79,6 +79,8 @@
struct kmem_cache *skbuff_head_cache __read_mostly;
static struct kmem_cache *skbuff_fclone_cache __read_mostly;
+int sysctl_max_skb_frags __read_mostly = MAX_SKB_FRAGS;
+EXPORT_SYMBOL(sysctl_max_skb_frags);
/**
* skb_panic - private function for out-of-line support
diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
index 95b6139..a6beb7b 100644
--- a/net/core/sysctl_net_core.c
+++ b/net/core/sysctl_net_core.c
@@ -26,6 +26,7 @@
static int one = 1;
static int min_sndbuf = SOCK_MIN_SNDBUF;
static int min_rcvbuf = SOCK_MIN_RCVBUF;
+static int max_skb_frags = MAX_SKB_FRAGS;
static int net_msg_warn; /* Unused, but still a sysctl */
@@ -392,6 +393,15 @@
.mode = 0644,
.proc_handler = proc_dointvec
},
+ {
+ .procname = "max_skb_frags",
+ .data = &sysctl_max_skb_frags,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = proc_dointvec_minmax,
+ .extra1 = &one,
+ .extra2 = &max_skb_frags,
+ },
{ }
};
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 5684e14..902d606 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -824,26 +824,26 @@
if (sk->sk_state == DCCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
- struct sock *nsk = NULL;
+ struct sock *nsk;
sk = req->rsk_listener;
- if (likely(sk->sk_state == DCCP_LISTEN)) {
- nsk = dccp_check_req(sk, skb, req);
- } else {
+ if (unlikely(sk->sk_state != DCCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
+ sock_hold(sk);
+ nsk = dccp_check_req(sk, skb, req);
if (!nsk) {
reqsk_put(req);
- goto discard_it;
+ goto discard_and_relse;
}
if (nsk == sk) {
- sock_hold(sk);
reqsk_put(req);
} else if (dccp_child_process(sk, nsk, skb)) {
dccp_v4_ctl_send_reset(sk, skb);
- goto discard_it;
+ goto discard_and_relse;
} else {
+ sock_put(sk);
return 0;
}
}
diff --git a/net/dccp/ipv6.c b/net/dccp/ipv6.c
index 9c6d050..b8608b7 100644
--- a/net/dccp/ipv6.c
+++ b/net/dccp/ipv6.c
@@ -691,26 +691,26 @@
if (sk->sk_state == DCCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
- struct sock *nsk = NULL;
+ struct sock *nsk;
sk = req->rsk_listener;
- if (likely(sk->sk_state == DCCP_LISTEN)) {
- nsk = dccp_check_req(sk, skb, req);
- } else {
+ if (unlikely(sk->sk_state != DCCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
+ sock_hold(sk);
+ nsk = dccp_check_req(sk, skb, req);
if (!nsk) {
reqsk_put(req);
- goto discard_it;
+ goto discard_and_relse;
}
if (nsk == sk) {
- sock_hold(sk);
reqsk_put(req);
} else if (dccp_child_process(sk, nsk, skb)) {
dccp_v6_ctl_send_reset(sk, skb);
- goto discard_it;
+ goto discard_and_relse;
} else {
+ sock_put(sk);
return 0;
}
}
diff --git a/net/dsa/slave.c b/net/dsa/slave.c
index 40b9ca7..ab24521 100644
--- a/net/dsa/slave.c
+++ b/net/dsa/slave.c
@@ -1194,7 +1194,6 @@
if (ret) {
netdev_err(master, "error %d registering interface %s\n",
ret, slave_dev->name);
- phy_disconnect(p->phy);
ds->ports[port] = NULL;
free_netdev(slave_dev);
return ret;
@@ -1205,6 +1204,7 @@
ret = dsa_slave_phy_setup(p, slave_dev);
if (ret) {
netdev_err(master, "error %d setting up slave phy\n", ret);
+ unregister_netdev(slave_dev);
free_netdev(slave_dev);
return ret;
}
diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
index cebd9d3..f6303b1 100644
--- a/net/ipv4/devinet.c
+++ b/net/ipv4/devinet.c
@@ -1847,7 +1847,7 @@
if (err < 0)
goto errout;
- err = EINVAL;
+ err = -EINVAL;
if (!tb[NETCONFA_IFINDEX])
goto errout;
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 46b9c88..6414891 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -789,14 +789,16 @@
reqsk_put(req);
}
-void inet_csk_reqsk_queue_add(struct sock *sk, struct request_sock *req,
- struct sock *child)
+struct sock *inet_csk_reqsk_queue_add(struct sock *sk,
+ struct request_sock *req,
+ struct sock *child)
{
struct request_sock_queue *queue = &inet_csk(sk)->icsk_accept_queue;
spin_lock(&queue->rskq_lock);
if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_child_forget(sk, req, child);
+ child = NULL;
} else {
req->sk = child;
req->dl_next = NULL;
@@ -808,6 +810,7 @@
sk_acceptq_added(sk);
}
spin_unlock(&queue->rskq_lock);
+ return child;
}
EXPORT_SYMBOL(inet_csk_reqsk_queue_add);
@@ -817,11 +820,8 @@
if (own_req) {
inet_csk_reqsk_queue_drop(sk, req);
reqsk_queue_removed(&inet_csk(sk)->icsk_accept_queue, req);
- inet_csk_reqsk_queue_add(sk, req, child);
- /* Warning: caller must not call reqsk_put(req);
- * child stole last reference on it.
- */
- return child;
+ if (inet_csk_reqsk_queue_add(sk, req, child))
+ return child;
}
/* Too bad, another child took ownership of the request, undo. */
bh_unlock_sock(child);
diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
index 7c51c4e..41ba68d 100644
--- a/net/ipv4/ip_gre.c
+++ b/net/ipv4/ip_gre.c
@@ -1054,8 +1054,9 @@
static void ipgre_tap_setup(struct net_device *dev)
{
ether_setup(dev);
- dev->netdev_ops = &gre_tap_netdev_ops;
- dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
+ dev->netdev_ops = &gre_tap_netdev_ops;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
+ dev->priv_flags |= IFF_LIVE_ADDR_CHANGE;
ip_tunnel_setup(dev, gre_tap_net_id);
}
@@ -1240,6 +1241,14 @@
err = ipgre_newlink(net, dev, tb, NULL);
if (err < 0)
goto out;
+
+ /* openvswitch users expect packet sizes to be unrestricted,
+ * so set the largest MTU we can.
+ */
+ err = __ip_tunnel_change_mtu(dev, IP_MAX_MTU, false);
+ if (err)
+ goto out;
+
return dev;
out:
free_netdev(dev);
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index 5f73a7c..a501242 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -249,6 +249,8 @@
switch (cmsg->cmsg_type) {
case IP_RETOPTS:
err = cmsg->cmsg_len - CMSG_ALIGN(sizeof(struct cmsghdr));
+
+ /* Our caller is responsible for freeing ipc->opt */
err = ip_options_get(net, &ipc->opt, CMSG_DATA(cmsg),
err < 40 ? err : 40);
if (err)
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index c7bd72e..89e8861 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -943,17 +943,31 @@
}
EXPORT_SYMBOL_GPL(ip_tunnel_ioctl);
-int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+int __ip_tunnel_change_mtu(struct net_device *dev, int new_mtu, bool strict)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
int t_hlen = tunnel->hlen + sizeof(struct iphdr);
+ int max_mtu = 0xFFF8 - dev->hard_header_len - t_hlen;
- if (new_mtu < 68 ||
- new_mtu > 0xFFF8 - dev->hard_header_len - t_hlen)
+ if (new_mtu < 68)
return -EINVAL;
+
+ if (new_mtu > max_mtu) {
+ if (strict)
+ return -EINVAL;
+
+ new_mtu = max_mtu;
+ }
+
dev->mtu = new_mtu;
return 0;
}
+EXPORT_SYMBOL_GPL(__ip_tunnel_change_mtu);
+
+int ip_tunnel_change_mtu(struct net_device *dev, int new_mtu)
+{
+ return __ip_tunnel_change_mtu(dev, new_mtu, true);
+}
EXPORT_SYMBOL_GPL(ip_tunnel_change_mtu);
static void ip_tunnel_dev_free(struct net_device *dev)
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index c117b21..d3a2716 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -746,8 +746,10 @@
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc, false);
- if (err)
+ if (unlikely(err)) {
+ kfree(ipc.opt);
return err;
+ }
if (ipc.opt)
free = 1;
}
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index bc35f18..7113bae 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -547,8 +547,10 @@
if (msg->msg_controllen) {
err = ip_cmsg_send(net, msg, &ipc, false);
- if (err)
+ if (unlikely(err)) {
+ kfree(ipc.opt);
goto out;
+ }
if (ipc.opt)
free = 1;
}
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 85f184e..02c6229 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -129,6 +129,7 @@
static int ip_rt_min_pmtu __read_mostly = 512 + 20 + 20;
static int ip_rt_min_advmss __read_mostly = 256;
+static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
/*
* Interface to generic destination cache.
*/
@@ -755,7 +756,7 @@
struct fib_nh *nh = &FIB_RES_NH(res);
update_or_create_fnhe(nh, fl4->daddr, new_gw,
- 0, 0);
+ 0, jiffies + ip_rt_gc_timeout);
}
if (kill_route)
rt->dst.obsolete = DST_OBSOLETE_KILL;
@@ -1556,6 +1557,36 @@
#endif
}
+static void ip_del_fnhe(struct fib_nh *nh, __be32 daddr)
+{
+ struct fnhe_hash_bucket *hash;
+ struct fib_nh_exception *fnhe, __rcu **fnhe_p;
+ u32 hval = fnhe_hashfun(daddr);
+
+ spin_lock_bh(&fnhe_lock);
+
+ hash = rcu_dereference_protected(nh->nh_exceptions,
+ lockdep_is_held(&fnhe_lock));
+ hash += hval;
+
+ fnhe_p = &hash->chain;
+ fnhe = rcu_dereference_protected(*fnhe_p, lockdep_is_held(&fnhe_lock));
+ while (fnhe) {
+ if (fnhe->fnhe_daddr == daddr) {
+ rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
+ fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+ fnhe_flush_routes(fnhe);
+ kfree_rcu(fnhe, rcu);
+ break;
+ }
+ fnhe_p = &fnhe->fnhe_next;
+ fnhe = rcu_dereference_protected(fnhe->fnhe_next,
+ lockdep_is_held(&fnhe_lock));
+ }
+
+ spin_unlock_bh(&fnhe_lock);
+}
+
/* called in rcu_read_lock() section */
static int __mkroute_input(struct sk_buff *skb,
const struct fib_result *res,
@@ -1609,11 +1640,20 @@
fnhe = find_exception(&FIB_RES_NH(*res), daddr);
if (do_cache) {
- if (fnhe)
+ if (fnhe) {
rth = rcu_dereference(fnhe->fnhe_rth_input);
- else
- rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+ if (rth && rth->dst.expires &&
+ time_after(jiffies, rth->dst.expires)) {
+ ip_del_fnhe(&FIB_RES_NH(*res), daddr);
+ fnhe = NULL;
+ } else {
+ goto rt_cache;
+ }
+ }
+ rth = rcu_dereference(FIB_RES_NH(*res).nh_rth_input);
+
+rt_cache:
if (rt_cache_valid(rth)) {
skb_dst_set_noref(skb, &rth->dst);
goto out;
@@ -2014,19 +2054,29 @@
struct fib_nh *nh = &FIB_RES_NH(*res);
fnhe = find_exception(nh, fl4->daddr);
- if (fnhe)
+ if (fnhe) {
prth = &fnhe->fnhe_rth_output;
- else {
- if (unlikely(fl4->flowi4_flags &
- FLOWI_FLAG_KNOWN_NH &&
- !(nh->nh_gw &&
- nh->nh_scope == RT_SCOPE_LINK))) {
- do_cache = false;
- goto add;
+ rth = rcu_dereference(*prth);
+ if (rth && rth->dst.expires &&
+ time_after(jiffies, rth->dst.expires)) {
+ ip_del_fnhe(nh, fl4->daddr);
+ fnhe = NULL;
+ } else {
+ goto rt_cache;
}
- prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
}
+
+ if (unlikely(fl4->flowi4_flags &
+ FLOWI_FLAG_KNOWN_NH &&
+ !(nh->nh_gw &&
+ nh->nh_scope == RT_SCOPE_LINK))) {
+ do_cache = false;
+ goto add;
+ }
+ prth = raw_cpu_ptr(nh->nh_pcpu_rth_output);
rth = rcu_dereference(*prth);
+
+rt_cache:
if (rt_cache_valid(rth)) {
dst_hold(&rth->dst);
return rth;
@@ -2569,7 +2619,6 @@
}
#ifdef CONFIG_SYSCTL
-static int ip_rt_gc_timeout __read_mostly = RT_GC_TIMEOUT;
static int ip_rt_gc_interval __read_mostly = 60 * HZ;
static int ip_rt_gc_min_interval __read_mostly = HZ / 2;
static int ip_rt_gc_elasticity __read_mostly = 8;
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 19746b3..483ffdf 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -940,7 +940,7 @@
i = skb_shinfo(skb)->nr_frags;
can_coalesce = skb_can_coalesce(skb, i, page, offset);
- if (!can_coalesce && i >= MAX_SKB_FRAGS) {
+ if (!can_coalesce && i >= sysctl_max_skb_frags) {
tcp_mark_push(tp, skb);
goto new_segment;
}
@@ -1213,7 +1213,7 @@
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
- if (i == MAX_SKB_FRAGS || !sg) {
+ if (i == sysctl_max_skb_frags || !sg) {
tcp_mark_push(tp, skb);
goto new_segment;
}
@@ -2950,7 +2950,7 @@
struct crypto_hash *hash;
hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
- if (IS_ERR_OR_NULL(hash))
+ if (IS_ERR(hash))
return;
per_cpu(tcp_md5sig_pool, cpu).md5_desc.tfm = hash;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 1c2a734..3b2c8e9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -2896,7 +2896,10 @@
{
const u32 now = tcp_time_stamp, wlen = sysctl_tcp_min_rtt_wlen * HZ;
struct rtt_meas *m = tcp_sk(sk)->rtt_min;
- struct rtt_meas rttm = { .rtt = (rtt_us ? : 1), .ts = now };
+ struct rtt_meas rttm = {
+ .rtt = likely(rtt_us) ? rtt_us : jiffies_to_usecs(1),
+ .ts = now,
+ };
u32 elapsed;
/* Check if the new measurement updates the 1st, 2nd, or 3rd choices */
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index a4d5237..487ac67 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -311,7 +311,7 @@
/* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
-void tcp_req_err(struct sock *sk, u32 seq)
+void tcp_req_err(struct sock *sk, u32 seq, bool abort)
{
struct request_sock *req = inet_reqsk(sk);
struct net *net = sock_net(sk);
@@ -323,7 +323,7 @@
if (seq != tcp_rsk(req)->snt_isn) {
NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
- } else {
+ } else if (abort) {
/*
* Still in SYN_RECV, just remove it silently.
* There is no good way to pass the error to the newly
@@ -383,7 +383,12 @@
}
seq = ntohl(th->seq);
if (sk->sk_state == TCP_NEW_SYN_RECV)
- return tcp_req_err(sk, seq);
+ return tcp_req_err(sk, seq,
+ type == ICMP_PARAMETERPROB ||
+ type == ICMP_TIME_EXCEEDED ||
+ (type == ICMP_DEST_UNREACH &&
+ (code == ICMP_NET_UNREACH ||
+ code == ICMP_HOST_UNREACH)));
bh_lock_sock(sk);
/* If too many ICMPs get dropped on busy
@@ -1592,28 +1597,30 @@
if (sk->sk_state == TCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
- struct sock *nsk = NULL;
+ struct sock *nsk;
sk = req->rsk_listener;
- if (tcp_v4_inbound_md5_hash(sk, skb))
- goto discard_and_relse;
- if (likely(sk->sk_state == TCP_LISTEN)) {
- nsk = tcp_check_req(sk, skb, req, false);
- } else {
+ if (unlikely(tcp_v4_inbound_md5_hash(sk, skb))) {
+ reqsk_put(req);
+ goto discard_it;
+ }
+ if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
+ sock_hold(sk);
+ nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
- goto discard_it;
+ goto discard_and_relse;
}
if (nsk == sk) {
- sock_hold(sk);
reqsk_put(req);
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v4_send_reset(nsk, skb);
- goto discard_it;
+ goto discard_and_relse;
} else {
+ sock_put(sk);
return 0;
}
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index be0b218..95d2f19 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1048,8 +1048,10 @@
if (msg->msg_controllen) {
err = ip_cmsg_send(sock_net(sk), msg, &ipc,
sk->sk_family == AF_INET6);
- if (err)
+ if (unlikely(err)) {
+ kfree(ipc.opt);
return err;
+ }
if (ipc.opt)
free = 1;
connected = 0;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 38eedde..bdd7eac 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -583,7 +583,7 @@
if (err < 0)
goto errout;
- err = EINVAL;
+ err = -EINVAL;
if (!tb[NETCONFA_IFINDEX])
goto errout;
@@ -3538,6 +3538,7 @@
{
struct inet6_dev *idev = ifp->idev;
struct net_device *dev = idev->dev;
+ bool notify = false;
addrconf_join_solict(dev, &ifp->addr);
@@ -3583,7 +3584,7 @@
/* Because optimistic nodes can use this address,
* notify listeners. If DAD fails, RTM_DELADDR is sent.
*/
- ipv6_ifa_notify(RTM_NEWADDR, ifp);
+ notify = true;
}
}
@@ -3591,6 +3592,8 @@
out:
spin_unlock(&ifp->lock);
read_unlock_bh(&idev->lock);
+ if (notify)
+ ipv6_ifa_notify(RTM_NEWADDR, ifp);
}
static void addrconf_dad_start(struct inet6_ifaddr *ifp)
diff --git a/net/ipv6/ip6_flowlabel.c b/net/ipv6/ip6_flowlabel.c
index 1f9ebe3..dc2db4f 100644
--- a/net/ipv6/ip6_flowlabel.c
+++ b/net/ipv6/ip6_flowlabel.c
@@ -540,12 +540,13 @@
}
spin_lock_bh(&ip6_sk_fl_lock);
for (sflp = &np->ipv6_fl_list;
- (sfl = rcu_dereference(*sflp)) != NULL;
+ (sfl = rcu_dereference_protected(*sflp,
+ lockdep_is_held(&ip6_sk_fl_lock))) != NULL;
sflp = &sfl->next) {
if (sfl->fl->label == freq.flr_label) {
if (freq.flr_label == (np->flow_label&IPV6_FLOWLABEL_MASK))
np->flow_label &= ~IPV6_FLOWLABEL_MASK;
- *sflp = rcu_dereference(sfl->next);
+ *sflp = sfl->next;
spin_unlock_bh(&ip6_sk_fl_lock);
fl_release(sfl->fl);
kfree_rcu(sfl, rcu);
diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
index f37f18b..a69aad1 100644
--- a/net/ipv6/ip6_gre.c
+++ b/net/ipv6/ip6_gre.c
@@ -1512,6 +1512,7 @@
dev->destructor = ip6gre_dev_free;
dev->features |= NETIF_F_NETNS_LOCAL;
+ dev->priv_flags &= ~IFF_TX_SKB_SHARING;
}
static int ip6gre_newlink(struct net *src_net, struct net_device *dev,
diff --git a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
index 31ba7ca..051b6a6 100644
--- a/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
+++ b/net/ipv6/netfilter/nf_nat_masquerade_ipv6.c
@@ -21,6 +21,10 @@
#include <net/ipv6.h>
#include <net/netfilter/ipv6/nf_nat_masquerade.h>
+#define MAX_WORK_COUNT 16
+
+static atomic_t v6_worker_count;
+
unsigned int
nf_nat_masquerade_ipv6(struct sk_buff *skb, const struct nf_nat_range *range,
const struct net_device *out)
@@ -78,14 +82,78 @@
.notifier_call = masq_device_event,
};
+struct masq_dev_work {
+ struct work_struct work;
+ struct net *net;
+ int ifindex;
+};
+
+static void iterate_cleanup_work(struct work_struct *work)
+{
+ struct masq_dev_work *w;
+ long index;
+
+ w = container_of(work, struct masq_dev_work, work);
+
+ index = w->ifindex;
+ nf_ct_iterate_cleanup(w->net, device_cmp, (void *)index, 0, 0);
+
+ put_net(w->net);
+ kfree(w);
+ atomic_dec(&v6_worker_count);
+ module_put(THIS_MODULE);
+}
+
+/* ipv6 inet notifier is an atomic notifier, i.e. we cannot
+ * schedule.
+ *
+ * Unfortunately, nf_ct_iterate_cleanup can run for a long
+ * time if there are lots of conntracks and the system
+ * handles high softirq load, so it frequently calls cond_resched
+ * while iterating the conntrack table.
+ *
+ * So we defer nf_ct_iterate_cleanup walk to the system workqueue.
+ *
+ * As we can have 'a lot' of inet_events (depending on amount
+ * of ipv6 addresses being deleted), we also need to add an upper
+ * limit to the number of queued work items.
+ */
static int masq_inet_event(struct notifier_block *this,
unsigned long event, void *ptr)
{
struct inet6_ifaddr *ifa = ptr;
- struct netdev_notifier_info info;
+ const struct net_device *dev;
+ struct masq_dev_work *w;
+ struct net *net;
- netdev_notifier_info_init(&info, ifa->idev->dev);
- return masq_device_event(this, event, &info);
+ if (event != NETDEV_DOWN ||
+ atomic_read(&v6_worker_count) >= MAX_WORK_COUNT)
+ return NOTIFY_DONE;
+
+ dev = ifa->idev->dev;
+ net = maybe_get_net(dev_net(dev));
+ if (!net)
+ return NOTIFY_DONE;
+
+ if (!try_module_get(THIS_MODULE))
+ goto err_module;
+
+ w = kmalloc(sizeof(*w), GFP_ATOMIC);
+ if (w) {
+ atomic_inc(&v6_worker_count);
+
+ INIT_WORK(&w->work, iterate_cleanup_work);
+ w->ifindex = dev->ifindex;
+ w->net = net;
+ schedule_work(&w->work);
+
+ return NOTIFY_DONE;
+ }
+
+ module_put(THIS_MODULE);
+ err_module:
+ put_net(net);
+ return NOTIFY_DONE;
}
static struct notifier_block masq_inet_notifier = {
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 006396e..5c8c842 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -327,6 +327,7 @@
struct tcp_sock *tp;
__u32 seq, snd_una;
struct sock *sk;
+ bool fatal;
int err;
sk = __inet6_lookup_established(net, &tcp_hashinfo,
@@ -345,8 +346,9 @@
return;
}
seq = ntohl(th->seq);
+ fatal = icmpv6_err_convert(type, code, &err);
if (sk->sk_state == TCP_NEW_SYN_RECV)
- return tcp_req_err(sk, seq);
+ return tcp_req_err(sk, seq, fatal);
bh_lock_sock(sk);
if (sock_owned_by_user(sk) && type != ICMPV6_PKT_TOOBIG)
@@ -400,7 +402,6 @@
goto out;
}
- icmpv6_err_convert(type, code, &err);
/* Might be for an request_sock */
switch (sk->sk_state) {
@@ -1386,7 +1387,7 @@
if (sk->sk_state == TCP_NEW_SYN_RECV) {
struct request_sock *req = inet_reqsk(sk);
- struct sock *nsk = NULL;
+ struct sock *nsk;
sk = req->rsk_listener;
tcp_v6_fill_cb(skb, hdr, th);
@@ -1394,24 +1395,24 @@
reqsk_put(req);
goto discard_it;
}
- if (likely(sk->sk_state == TCP_LISTEN)) {
- nsk = tcp_check_req(sk, skb, req, false);
- } else {
+ if (unlikely(sk->sk_state != TCP_LISTEN)) {
inet_csk_reqsk_queue_drop_and_put(sk, req);
goto lookup;
}
+ sock_hold(sk);
+ nsk = tcp_check_req(sk, skb, req, false);
if (!nsk) {
reqsk_put(req);
- goto discard_it;
+ goto discard_and_relse;
}
if (nsk == sk) {
- sock_hold(sk);
reqsk_put(req);
tcp_v6_restore_cb(skb);
} else if (tcp_child_process(sk, nsk, skb)) {
tcp_v6_send_reset(nsk, skb);
- goto discard_it;
+ goto discard_and_relse;
} else {
+ sock_put(sk);
return 0;
}
}
diff --git a/net/l2tp/l2tp_netlink.c b/net/l2tp/l2tp_netlink.c
index f93c5be..2caaa84 100644
--- a/net/l2tp/l2tp_netlink.c
+++ b/net/l2tp/l2tp_netlink.c
@@ -124,8 +124,13 @@
ret = l2tp_nl_tunnel_send(msg, info->snd_portid, info->snd_seq,
NLM_F_ACK, tunnel, cmd);
- if (ret >= 0)
- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ if (ret >= 0) {
+ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ /* We don't care if no one is listening */
+ if (ret == -ESRCH)
+ ret = 0;
+ return ret;
+ }
nlmsg_free(msg);
@@ -147,8 +152,13 @@
ret = l2tp_nl_session_send(msg, info->snd_portid, info->snd_seq,
NLM_F_ACK, session, cmd);
- if (ret >= 0)
- return genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ if (ret >= 0) {
+ ret = genlmsg_multicast_allns(family, msg, 0, 0, GFP_ATOMIC);
+ /* We don't care if no one is listening */
+ if (ret == -ESRCH)
+ ret = 0;
+ return ret;
+ }
nlmsg_free(msg);
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 8c067e6..95e757c 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -891,7 +891,7 @@
depends on IPV6 || IPV6=n
depends on !NF_CONNTRACK || NF_CONNTRACK
select NF_DUP_IPV4
- select NF_DUP_IPV6 if IP6_NF_IPTABLES != n
+ select NF_DUP_IPV6 if IPV6
---help---
This option adds a "TEE" target with which a packet can be cloned and
this clone be rerouted to another nexthop.
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 58882de..f60b4fd 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1412,6 +1412,7 @@
}
spin_unlock(lockp);
local_bh_enable();
+ cond_resched();
}
for_each_possible_cpu(cpu) {
@@ -1424,6 +1425,7 @@
set_bit(IPS_DYING_BIT, &ct->status);
}
spin_unlock_bh(&pcpu->lock);
+ cond_resched();
}
return NULL;
found:
@@ -1440,6 +1442,8 @@
struct nf_conn *ct;
unsigned int bucket = 0;
+ might_sleep();
+
while ((ct = get_next_corpse(net, iter, data, &bucket)) != NULL) {
/* Time to push up daises... */
if (del_timer(&ct->timeout))
@@ -1448,6 +1452,7 @@
/* ... else the timer will get him soon. */
nf_ct_put(ct);
+ cond_resched();
}
}
EXPORT_SYMBOL_GPL(nf_ct_iterate_cleanup);
diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c
index a7ba233..857ae89 100644
--- a/net/netfilter/nfnetlink.c
+++ b/net/netfilter/nfnetlink.c
@@ -311,14 +311,14 @@
#endif
{
nfnl_unlock(subsys_id);
- netlink_ack(skb, nlh, -EOPNOTSUPP);
+ netlink_ack(oskb, nlh, -EOPNOTSUPP);
return kfree_skb(skb);
}
}
if (!ss->commit || !ss->abort) {
nfnl_unlock(subsys_id);
- netlink_ack(skb, nlh, -EOPNOTSUPP);
+ netlink_ack(oskb, nlh, -EOPNOTSUPP);
return kfree_skb(skb);
}
@@ -328,10 +328,12 @@
nlh = nlmsg_hdr(skb);
err = 0;
- if (nlmsg_len(nlh) < sizeof(struct nfgenmsg) ||
- skb->len < nlh->nlmsg_len) {
- err = -EINVAL;
- goto ack;
+ if (nlh->nlmsg_len < NLMSG_HDRLEN ||
+ skb->len < nlh->nlmsg_len ||
+ nlmsg_len(nlh) < sizeof(struct nfgenmsg)) {
+ nfnl_err_reset(&err_list);
+ status |= NFNL_BATCH_FAILURE;
+ goto done;
}
/* Only requests are handled by the kernel */
@@ -406,7 +408,7 @@
* pointing to the batch header.
*/
nfnl_err_reset(&err_list);
- netlink_ack(skb, nlmsg_hdr(oskb), -ENOMEM);
+ netlink_ack(oskb, nlmsg_hdr(oskb), -ENOMEM);
status |= NFNL_BATCH_FAILURE;
goto done;
}
diff --git a/net/netfilter/nfnetlink_cttimeout.c b/net/netfilter/nfnetlink_cttimeout.c
index 94837d2..2671b9d 100644
--- a/net/netfilter/nfnetlink_cttimeout.c
+++ b/net/netfilter/nfnetlink_cttimeout.c
@@ -312,7 +312,7 @@
hlist_nulls_for_each_entry(h, nn, &net->ct.hash[i], hnnode)
untimeout(h, timeout);
}
- nf_conntrack_lock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
+ spin_unlock(&nf_conntrack_locks[i % CONNTRACK_LOCKS]);
}
local_bh_enable();
}
diff --git a/net/netfilter/nft_counter.c b/net/netfilter/nft_counter.c
index c7808fc..c9743f7 100644
--- a/net/netfilter/nft_counter.c
+++ b/net/netfilter/nft_counter.c
@@ -100,7 +100,7 @@
cpu_stats = netdev_alloc_pcpu_stats(struct nft_counter_percpu);
if (cpu_stats == NULL)
- return ENOMEM;
+ return -ENOMEM;
preempt_disable();
this_cpu = this_cpu_ptr(cpu_stats);
@@ -138,7 +138,7 @@
cpu_stats = __netdev_alloc_pcpu_stats(struct nft_counter_percpu,
GFP_ATOMIC);
if (cpu_stats == NULL)
- return ENOMEM;
+ return -ENOMEM;
preempt_disable();
this_cpu = this_cpu_ptr(cpu_stats);
diff --git a/net/netfilter/xt_TEE.c b/net/netfilter/xt_TEE.c
index 3eff7b6..6e57a39 100644
--- a/net/netfilter/xt_TEE.c
+++ b/net/netfilter/xt_TEE.c
@@ -38,7 +38,7 @@
return XT_CONTINUE;
}
-#if IS_ENABLED(CONFIG_NF_DUP_IPV6)
+#if IS_ENABLED(CONFIG_IPV6)
static unsigned int
tee_tg6(struct sk_buff *skb, const struct xt_action_param *par)
{
@@ -131,7 +131,7 @@
.destroy = tee_tg_destroy,
.me = THIS_MODULE,
},
-#if IS_ENABLED(CONFIG_NF_DUP_IPV6)
+#if IS_ENABLED(CONFIG_IPV6)
{
.name = "TEE",
.revision = 1,
diff --git a/net/openvswitch/vport-vxlan.c b/net/openvswitch/vport-vxlan.c
index 1605691..5eb7694 100644
--- a/net/openvswitch/vport-vxlan.c
+++ b/net/openvswitch/vport-vxlan.c
@@ -90,7 +90,9 @@
int err;
struct vxlan_config conf = {
.no_share = true,
- .flags = VXLAN_F_COLLECT_METADATA,
+ .flags = VXLAN_F_COLLECT_METADATA | VXLAN_F_UDP_ZERO_CSUM6_RX,
+ /* Don't restrict the packets that can be sent by MTU */
+ .mtu = IP_MAX_MTU,
};
if (!options) {
diff --git a/net/sched/sch_api.c b/net/sched/sch_api.c
index b5c2cf2..af1acf0 100644
--- a/net/sched/sch_api.c
+++ b/net/sched/sch_api.c
@@ -1852,6 +1852,7 @@
}
tp = old_tp;
+ protocol = tc_skb_protocol(skb);
goto reclassify;
#endif
}
diff --git a/net/sctp/protocol.c b/net/sctp/protocol.c
index ab0d538..1099e99 100644
--- a/net/sctp/protocol.c
+++ b/net/sctp/protocol.c
@@ -60,6 +60,8 @@
#include <net/inet_common.h>
#include <net/inet_ecn.h>
+#define MAX_SCTP_PORT_HASH_ENTRIES (64 * 1024)
+
/* Global data structures. */
struct sctp_globals sctp_globals __read_mostly;
@@ -1355,6 +1357,8 @@
unsigned long limit;
int max_share;
int order;
+ int num_entries;
+ int max_entry_order;
sock_skb_cb_check_size(sizeof(struct sctp_ulpevent));
@@ -1407,14 +1411,24 @@
/* Size and allocate the association hash table.
* The methodology is similar to that of the tcp hash tables.
+ * Though not identical. Start by getting a goal size
*/
if (totalram_pages >= (128 * 1024))
goal = totalram_pages >> (22 - PAGE_SHIFT);
else
goal = totalram_pages >> (24 - PAGE_SHIFT);
- for (order = 0; (1UL << order) < goal; order++)
- ;
+ /* Then compute the page order for said goal */
+ order = get_order(goal);
+
+ /* Now compute the required page order for the maximum sized table we
+ * want to create
+ */
+ max_entry_order = get_order(MAX_SCTP_PORT_HASH_ENTRIES *
+ sizeof(struct sctp_bind_hashbucket));
+
+ /* Limit the page order by that maximum hash table size */
+ order = min(order, max_entry_order);
/* Allocate and initialize the endpoint hash table. */
sctp_ep_hashsize = 64;
@@ -1430,20 +1444,35 @@
INIT_HLIST_HEAD(&sctp_ep_hashtable[i].chain);
}
- /* Allocate and initialize the SCTP port hash table. */
+ /* Allocate and initialize the SCTP port hash table.
+ * Note that order is initalized to start at the max sized
+ * table we want to support. If we can't get that many pages
+ * reduce the order and try again
+ */
do {
- sctp_port_hashsize = (1UL << order) * PAGE_SIZE /
- sizeof(struct sctp_bind_hashbucket);
- if ((sctp_port_hashsize > (64 * 1024)) && order > 0)
- continue;
sctp_port_hashtable = (struct sctp_bind_hashbucket *)
__get_free_pages(GFP_KERNEL | __GFP_NOWARN, order);
} while (!sctp_port_hashtable && --order > 0);
+
if (!sctp_port_hashtable) {
pr_err("Failed bind hash alloc\n");
status = -ENOMEM;
goto err_bhash_alloc;
}
+
+ /* Now compute the number of entries that will fit in the
+ * port hash space we allocated
+ */
+ num_entries = (1UL << order) * PAGE_SIZE /
+ sizeof(struct sctp_bind_hashbucket);
+
+ /* And finish by rounding it down to the nearest power of two
+ * this wastes some memory of course, but its needed because
+ * the hash function operates based on the assumption that
+ * that the number of entries is a power of two
+ */
+ sctp_port_hashsize = rounddown_pow_of_two(num_entries);
+
for (i = 0; i < sctp_port_hashsize; i++) {
spin_lock_init(&sctp_port_hashtable[i].lock);
INIT_HLIST_HEAD(&sctp_port_hashtable[i].chain);
@@ -1452,7 +1481,8 @@
if (sctp_transport_hashtable_init())
goto err_thash_alloc;
- pr_info("Hash tables configured (bind %d)\n", sctp_port_hashsize);
+ pr_info("Hash tables configured (bind %d/%d)\n", sctp_port_hashsize,
+ num_entries);
sctp_sysctl_register();
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 5ca2ebf..e878da0 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -5538,6 +5538,7 @@
struct sctp_hmac_algo_param *hmacs;
__u16 data_len = 0;
u32 num_idents;
+ int i;
if (!ep->auth_enable)
return -EACCES;
@@ -5555,8 +5556,12 @@
return -EFAULT;
if (put_user(num_idents, &p->shmac_num_idents))
return -EFAULT;
- if (copy_to_user(p->shmac_idents, hmacs->hmac_ids, data_len))
- return -EFAULT;
+ for (i = 0; i < num_idents; i++) {
+ __u16 hmacid = ntohs(hmacs->hmac_ids[i]);
+
+ if (copy_to_user(&p->shmac_idents[i], &hmacid, sizeof(__u16)))
+ return -EFAULT;
+ }
return 0;
}
diff --git a/net/sunrpc/auth_gss/auth_gss.c b/net/sunrpc/auth_gss/auth_gss.c
index 799e65b..cabf586 100644
--- a/net/sunrpc/auth_gss/auth_gss.c
+++ b/net/sunrpc/auth_gss/auth_gss.c
@@ -740,7 +740,7 @@
default:
printk(KERN_CRIT "%s: bad return from "
"gss_fill_context: %zd\n", __func__, err);
- BUG();
+ gss_msg->msg.errno = -EIO;
}
goto err_release_msg;
}
diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c
index 2b32fd6..273bc3a 100644
--- a/net/sunrpc/cache.c
+++ b/net/sunrpc/cache.c
@@ -1225,7 +1225,7 @@
if (bp[0] == '\\' && bp[1] == 'x') {
/* HEX STRING */
bp += 2;
- while (len < bufsize) {
+ while (len < bufsize - 1) {
int h, l;
h = hex_to_bin(bp[0]);
diff --git a/net/sunrpc/xprtrdma/backchannel.c b/net/sunrpc/xprtrdma/backchannel.c
index cc1251d..2dcd764 100644
--- a/net/sunrpc/xprtrdma/backchannel.c
+++ b/net/sunrpc/xprtrdma/backchannel.c
@@ -341,6 +341,8 @@
rqst->rq_reply_bytes_recvd = 0;
rqst->rq_bytes_sent = 0;
rqst->rq_xid = headerp->rm_xid;
+
+ rqst->rq_private_buf.len = size;
set_bit(RPC_BC_PA_IN_USE, &rqst->rq_bc_pa_state);
buf = &rqst->rq_rcv_buf;
diff --git a/net/tipc/link.c b/net/tipc/link.c
index 0c2944f..347cdc9 100644
--- a/net/tipc/link.c
+++ b/net/tipc/link.c
@@ -1973,8 +1973,10 @@
hdr = genlmsg_put(msg->skb, msg->portid, msg->seq, &tipc_genl_family,
NLM_F_MULTI, TIPC_NL_LINK_GET);
- if (!hdr)
+ if (!hdr) {
+ tipc_bcast_unlock(net);
return -EMSGSIZE;
+ }
attrs = nla_nest_start(msg->skb, TIPC_NLA_LINK);
if (!attrs)
diff --git a/net/tipc/node.c b/net/tipc/node.c
index fa97d96..9d7a16f 100644
--- a/net/tipc/node.c
+++ b/net/tipc/node.c
@@ -346,12 +346,6 @@
skb_queue_head_init(&n->bc_entry.inputq2);
for (i = 0; i < MAX_BEARERS; i++)
spin_lock_init(&n->links[i].lock);
- hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
- list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
- if (n->addr < temp_node->addr)
- break;
- }
- list_add_tail_rcu(&n->list, &temp_node->list);
n->state = SELF_DOWN_PEER_LEAVING;
n->signature = INVALID_NODE_SIG;
n->active_links[0] = INVALID_BEARER_ID;
@@ -372,6 +366,12 @@
tipc_node_get(n);
setup_timer(&n->timer, tipc_node_timeout, (unsigned long)n);
n->keepalive_intv = U32_MAX;
+ hlist_add_head_rcu(&n->hash, &tn->node_htable[tipc_hashfn(addr)]);
+ list_for_each_entry_rcu(temp_node, &tn->node_list, list) {
+ if (n->addr < temp_node->addr)
+ break;
+ }
+ list_add_tail_rcu(&n->list, &temp_node->list);
exit:
spin_unlock_bh(&tn->node_list_lock);
return n;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index 49d5093..f75f847 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -1496,7 +1496,7 @@
UNIXCB(skb).fp = NULL;
for (i = scm->fp->count-1; i >= 0; i--)
- unix_notinflight(scm->fp->fp[i]);
+ unix_notinflight(scm->fp->user, scm->fp->fp[i]);
}
static void unix_destruct_scm(struct sk_buff *skb)
@@ -1561,7 +1561,7 @@
return -ENOMEM;
for (i = scm->fp->count - 1; i >= 0; i--)
- unix_inflight(scm->fp->fp[i]);
+ unix_inflight(scm->fp->user, scm->fp->fp[i]);
return max_level;
}
@@ -1781,7 +1781,12 @@
goto out_unlock;
}
- if (unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
+ /* other == sk && unix_peer(other) != sk if
+ * - unix_peer(sk) == NULL, destination address bound to sk
+ * - unix_peer(sk) == sk by time of get but disconnected before lock
+ */
+ if (other != sk &&
+ unlikely(unix_peer(other) != sk && unix_recvq_full(other))) {
if (timeo) {
timeo = unix_wait_for_peer(other, timeo);
@@ -2277,13 +2282,15 @@
size_t size = state->size;
unsigned int last_len;
- err = -EINVAL;
- if (sk->sk_state != TCP_ESTABLISHED)
+ if (unlikely(sk->sk_state != TCP_ESTABLISHED)) {
+ err = -EINVAL;
goto out;
+ }
- err = -EOPNOTSUPP;
- if (flags & MSG_OOB)
+ if (unlikely(flags & MSG_OOB)) {
+ err = -EOPNOTSUPP;
goto out;
+ }
target = sock_rcvlowat(sk, flags & MSG_WAITALL, size);
timeo = sock_rcvtimeo(sk, noblock);
@@ -2305,6 +2312,7 @@
bool drop_skb;
struct sk_buff *skb, *last;
+redo:
unix_state_lock(sk);
if (sock_flag(sk, SOCK_DEAD)) {
err = -ECONNRESET;
@@ -2329,9 +2337,11 @@
goto unlock;
unix_state_unlock(sk);
- err = -EAGAIN;
- if (!timeo)
+ if (!timeo) {
+ err = -EAGAIN;
break;
+ }
+
mutex_unlock(&u->readlock);
timeo = unix_stream_data_wait(sk, timeo, last,
@@ -2344,7 +2354,7 @@
}
mutex_lock(&u->readlock);
- continue;
+ goto redo;
unlock:
unix_state_unlock(sk);
break;
diff --git a/net/unix/diag.c b/net/unix/diag.c
index c512f64..4d96797 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -220,7 +220,7 @@
return skb->len;
}
-static struct sock *unix_lookup_by_ino(int ino)
+static struct sock *unix_lookup_by_ino(unsigned int ino)
{
int i;
struct sock *sk;
diff --git a/net/unix/garbage.c b/net/unix/garbage.c
index 8fcdc22..6a0d485 100644
--- a/net/unix/garbage.c
+++ b/net/unix/garbage.c
@@ -116,7 +116,7 @@
* descriptor if it is for an AF_UNIX socket.
*/
-void unix_inflight(struct file *fp)
+void unix_inflight(struct user_struct *user, struct file *fp)
{
struct sock *s = unix_get_socket(fp);
@@ -133,11 +133,11 @@
}
unix_tot_inflight++;
}
- fp->f_cred->user->unix_inflight++;
+ user->unix_inflight++;
spin_unlock(&unix_gc_lock);
}
-void unix_notinflight(struct file *fp)
+void unix_notinflight(struct user_struct *user, struct file *fp)
{
struct sock *s = unix_get_socket(fp);
@@ -152,7 +152,7 @@
list_del_init(&u->link);
unix_tot_inflight--;
}
- fp->f_cred->user->unix_inflight--;
+ user->unix_inflight--;
spin_unlock(&unix_gc_lock);
}
diff --git a/net/vmw_vsock/af_vsock.c b/net/vmw_vsock/af_vsock.c
index 7fd1220..bbe65dc 100644
--- a/net/vmw_vsock/af_vsock.c
+++ b/net/vmw_vsock/af_vsock.c
@@ -1557,8 +1557,6 @@
if (err < 0)
goto out;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
-
while (total_written < len) {
ssize_t written;
@@ -1578,7 +1576,9 @@
goto out_wait;
release_sock(sk);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = schedule_timeout(timeout);
+ finish_wait(sk_sleep(sk), &wait);
lock_sock(sk);
if (signal_pending(current)) {
err = sock_intr_errno(timeout);
@@ -1588,8 +1588,6 @@
goto out_wait;
}
- prepare_to_wait(sk_sleep(sk), &wait,
- TASK_INTERRUPTIBLE);
}
/* These checks occur both as part of and after the loop
@@ -1635,7 +1633,6 @@
out_wait:
if (total_written > 0)
err = total_written;
- finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
@@ -1716,7 +1713,6 @@
if (err < 0)
goto out;
- prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
while (1) {
s64 ready = vsock_stream_has_data(vsk);
@@ -1727,7 +1723,7 @@
*/
err = -ENOMEM;
- goto out_wait;
+ goto out;
} else if (ready > 0) {
ssize_t read;
@@ -1750,7 +1746,7 @@
vsk, target, read,
!(flags & MSG_PEEK), &recv_data);
if (err < 0)
- goto out_wait;
+ goto out;
if (read >= target || flags & MSG_PEEK)
break;
@@ -1773,7 +1769,9 @@
break;
release_sock(sk);
+ prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE);
timeout = schedule_timeout(timeout);
+ finish_wait(sk_sleep(sk), &wait);
lock_sock(sk);
if (signal_pending(current)) {
@@ -1783,9 +1781,6 @@
err = -EAGAIN;
break;
}
-
- prepare_to_wait(sk_sleep(sk), &wait,
- TASK_INTERRUPTIBLE);
}
}
@@ -1816,8 +1811,6 @@
err = copied;
}
-out_wait:
- finish_wait(sk_sleep(sk), &wait);
out:
release_sock(sk);
return err;
diff --git a/scripts/prune-kernel b/scripts/prune-kernel
new file mode 100755
index 0000000..ab5034e
--- /dev/null
+++ b/scripts/prune-kernel
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+# because I use CONFIG_LOCALVERSION_AUTO, not the same version again and
+# again, /boot and /lib/modules/ eventually fill up.
+# Dumb script to purge that stuff:
+
+for f in "$@"
+do
+ if rpm -qf "/lib/modules/$f" >/dev/null; then
+ echo "keeping $f (installed from rpm)"
+ elif [ $(uname -r) = "$f" ]; then
+ echo "keeping $f (running kernel) "
+ else
+ echo "removing $f"
+ rm -f "/boot/initramfs-$f.img" "/boot/System.map-$f"
+ rm -f "/boot/vmlinuz-$f" "/boot/config-$f"
+ rm -rf "/lib/modules/$f"
+ new-kernel-pkg --remove $f
+ fi
+done
diff --git a/security/integrity/evm/evm_main.c b/security/integrity/evm/evm_main.c
index f716025..e6ea9d4 100644
--- a/security/integrity/evm/evm_main.c
+++ b/security/integrity/evm/evm_main.c
@@ -23,6 +23,7 @@
#include <linux/integrity.h>
#include <linux/evm.h>
#include <crypto/hash.h>
+#include <crypto/algapi.h>
#include "evm.h"
int evm_initialized;
@@ -148,7 +149,7 @@
xattr_value_len, calc.digest);
if (rc)
break;
- rc = memcmp(xattr_data->digest, calc.digest,
+ rc = crypto_memneq(xattr_data->digest, calc.digest,
sizeof(calc.digest));
if (rc)
rc = -EINVAL;
diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
index f8110cf..f1ab715 100644
--- a/security/selinux/hooks.c
+++ b/security/selinux/hooks.c
@@ -3249,7 +3249,7 @@
static void selinux_inode_getsecid(struct inode *inode, u32 *secid)
{
- struct inode_security_struct *isec = inode_security(inode);
+ struct inode_security_struct *isec = inode_security_novalidate(inode);
*secid = isec->sid;
}
diff --git a/security/selinux/nlmsgtab.c b/security/selinux/nlmsgtab.c
index 2bbb418..8495b93 100644
--- a/security/selinux/nlmsgtab.c
+++ b/security/selinux/nlmsgtab.c
@@ -83,6 +83,7 @@
{ TCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
{ DCCPDIAG_GETSOCK, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
{ SOCK_DIAG_BY_FAMILY, NETLINK_TCPDIAG_SOCKET__NLMSG_READ },
+ { SOCK_DESTROY, NETLINK_TCPDIAG_SOCKET__NLMSG_WRITE },
};
static struct nlmsg_perm nlmsg_xfrm_perms[] =
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index fadd3eb..9106d8e 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -74,6 +74,18 @@
static DEFINE_RWLOCK(snd_pcm_link_rwlock);
static DECLARE_RWSEM(snd_pcm_link_rwsem);
+/* Writer in rwsem may block readers even during its waiting in queue,
+ * and this may lead to a deadlock when the code path takes read sem
+ * twice (e.g. one in snd_pcm_action_nonatomic() and another in
+ * snd_pcm_stream_lock()). As a (suboptimal) workaround, let writer to
+ * spin until it gets the lock.
+ */
+static inline void down_write_nonblock(struct rw_semaphore *lock)
+{
+ while (!down_write_trylock(lock))
+ cond_resched();
+}
+
/**
* snd_pcm_stream_lock - Lock the PCM stream
* @substream: PCM substream
@@ -1813,7 +1825,7 @@
res = -ENOMEM;
goto _nolock;
}
- down_write(&snd_pcm_link_rwsem);
+ down_write_nonblock(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (substream->runtime->status->state == SNDRV_PCM_STATE_OPEN ||
substream->runtime->status->state != substream1->runtime->status->state ||
@@ -1860,7 +1872,7 @@
struct snd_pcm_substream *s;
int res = 0;
- down_write(&snd_pcm_link_rwsem);
+ down_write_nonblock(&snd_pcm_link_rwsem);
write_lock_irq(&snd_pcm_link_rwlock);
if (!snd_pcm_stream_linked(substream)) {
res = -EALREADY;
diff --git a/sound/core/seq/seq_memory.c b/sound/core/seq/seq_memory.c
index 8010766..c850345 100644
--- a/sound/core/seq/seq_memory.c
+++ b/sound/core/seq/seq_memory.c
@@ -383,15 +383,20 @@
if (snd_BUG_ON(!pool))
return -EINVAL;
- if (pool->ptr) /* should be atomic? */
- return 0;
- pool->ptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
- if (!pool->ptr)
+ cellptr = vmalloc(sizeof(struct snd_seq_event_cell) * pool->size);
+ if (!cellptr)
return -ENOMEM;
/* add new cells to the free cell list */
spin_lock_irqsave(&pool->lock, flags);
+ if (pool->ptr) {
+ spin_unlock_irqrestore(&pool->lock, flags);
+ vfree(cellptr);
+ return 0;
+ }
+
+ pool->ptr = cellptr;
pool->free = NULL;
for (cell = 0; cell < pool->size; cell++) {
diff --git a/sound/core/seq/seq_ports.c b/sound/core/seq/seq_ports.c
index 921fb2b..fe686ee 100644
--- a/sound/core/seq/seq_ports.c
+++ b/sound/core/seq/seq_ports.c
@@ -535,19 +535,22 @@
bool is_src, bool ack)
{
struct snd_seq_port_subs_info *grp;
+ struct list_head *list;
+ bool empty;
grp = is_src ? &port->c_src : &port->c_dest;
+ list = is_src ? &subs->src_list : &subs->dest_list;
down_write(&grp->list_mutex);
write_lock_irq(&grp->list_lock);
- if (is_src)
- list_del(&subs->src_list);
- else
- list_del(&subs->dest_list);
+ empty = list_empty(list);
+ if (!empty)
+ list_del_init(list);
grp->exclusive = 0;
write_unlock_irq(&grp->list_lock);
up_write(&grp->list_mutex);
- unsubscribe_port(client, port, grp, &subs->info, ack);
+ if (!empty)
+ unsubscribe_port(client, port, grp, &subs->info, ack);
}
/* connect two ports */
diff --git a/sound/core/timer.c b/sound/core/timer.c
index 9b513a0..dca817f 100644
--- a/sound/core/timer.c
+++ b/sound/core/timer.c
@@ -422,7 +422,7 @@
spin_lock_irqsave(&timer->lock, flags);
list_for_each_entry(ts, &ti->slave_active_head, active_list)
if (ts->ccallback)
- ts->ccallback(ti, event + 100, &tstamp, resolution);
+ ts->ccallback(ts, event + 100, &tstamp, resolution);
spin_unlock_irqrestore(&timer->lock, flags);
}
@@ -518,9 +518,13 @@
spin_unlock_irqrestore(&slave_active_lock, flags);
return -EBUSY;
}
+ if (timeri->timer)
+ spin_lock(&timeri->timer->lock);
timeri->flags &= ~SNDRV_TIMER_IFLG_RUNNING;
list_del_init(&timeri->ack_list);
list_del_init(&timeri->active_list);
+ if (timeri->timer)
+ spin_unlock(&timeri->timer->lock);
spin_unlock_irqrestore(&slave_active_lock, flags);
goto __end;
}
@@ -1929,6 +1933,7 @@
{
struct snd_timer_user *tu;
long result = 0, unit;
+ int qhead;
int err = 0;
tu = file->private_data;
@@ -1940,7 +1945,7 @@
if ((file->f_flags & O_NONBLOCK) != 0 || result > 0) {
err = -EAGAIN;
- break;
+ goto _error;
}
set_current_state(TASK_INTERRUPTIBLE);
@@ -1955,42 +1960,37 @@
if (tu->disconnected) {
err = -ENODEV;
- break;
+ goto _error;
}
if (signal_pending(current)) {
err = -ERESTARTSYS;
- break;
+ goto _error;
}
}
+ qhead = tu->qhead++;
+ tu->qhead %= tu->queue_size;
spin_unlock_irq(&tu->qlock);
- if (err < 0)
- goto _error;
if (tu->tread) {
- if (copy_to_user(buffer, &tu->tqueue[tu->qhead++],
- sizeof(struct snd_timer_tread))) {
+ if (copy_to_user(buffer, &tu->tqueue[qhead],
+ sizeof(struct snd_timer_tread)))
err = -EFAULT;
- goto _error;
- }
} else {
- if (copy_to_user(buffer, &tu->queue[tu->qhead++],
- sizeof(struct snd_timer_read))) {
+ if (copy_to_user(buffer, &tu->queue[qhead],
+ sizeof(struct snd_timer_read)))
err = -EFAULT;
- goto _error;
- }
}
- tu->qhead %= tu->queue_size;
-
- result += unit;
- buffer += unit;
-
spin_lock_irq(&tu->qlock);
tu->qused--;
+ if (err < 0)
+ goto _error;
+ result += unit;
+ buffer += unit;
}
- spin_unlock_irq(&tu->qlock);
_error:
+ spin_unlock_irq(&tu->qlock);
return result > 0 ? result : err;
}
diff --git a/sound/drivers/dummy.c b/sound/drivers/dummy.c
index bde3330..c0f8f61 100644
--- a/sound/drivers/dummy.c
+++ b/sound/drivers/dummy.c
@@ -87,7 +87,7 @@
module_param(fake_buffer, bool, 0444);
MODULE_PARM_DESC(fake_buffer, "Fake buffer allocations.");
#ifdef CONFIG_HIGH_RES_TIMERS
-module_param(hrtimer, bool, 0444);
+module_param(hrtimer, bool, 0644);
MODULE_PARM_DESC(hrtimer, "Use hrtimer as the timer source.");
#endif
@@ -109,6 +109,9 @@
snd_pcm_uframes_t (*pointer)(struct snd_pcm_substream *);
};
+#define get_dummy_ops(substream) \
+ (*(const struct dummy_timer_ops **)(substream)->runtime->private_data)
+
struct dummy_model {
const char *name;
int (*playback_constraints)(struct snd_pcm_runtime *runtime);
@@ -137,7 +140,6 @@
int iobox;
struct snd_kcontrol *cd_volume_ctl;
struct snd_kcontrol *cd_switch_ctl;
- const struct dummy_timer_ops *timer_ops;
};
/*
@@ -231,6 +233,8 @@
*/
struct dummy_systimer_pcm {
+ /* ops must be the first item */
+ const struct dummy_timer_ops *timer_ops;
spinlock_t lock;
struct timer_list timer;
unsigned long base_time;
@@ -366,6 +370,8 @@
*/
struct dummy_hrtimer_pcm {
+ /* ops must be the first item */
+ const struct dummy_timer_ops *timer_ops;
ktime_t base_time;
ktime_t period_time;
atomic_t running;
@@ -492,31 +498,25 @@
static int dummy_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
switch (cmd) {
case SNDRV_PCM_TRIGGER_START:
case SNDRV_PCM_TRIGGER_RESUME:
- return dummy->timer_ops->start(substream);
+ return get_dummy_ops(substream)->start(substream);
case SNDRV_PCM_TRIGGER_STOP:
case SNDRV_PCM_TRIGGER_SUSPEND:
- return dummy->timer_ops->stop(substream);
+ return get_dummy_ops(substream)->stop(substream);
}
return -EINVAL;
}
static int dummy_pcm_prepare(struct snd_pcm_substream *substream)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
- return dummy->timer_ops->prepare(substream);
+ return get_dummy_ops(substream)->prepare(substream);
}
static snd_pcm_uframes_t dummy_pcm_pointer(struct snd_pcm_substream *substream)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
-
- return dummy->timer_ops->pointer(substream);
+ return get_dummy_ops(substream)->pointer(substream);
}
static struct snd_pcm_hardware dummy_pcm_hardware = {
@@ -562,17 +562,19 @@
struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
struct dummy_model *model = dummy->model;
struct snd_pcm_runtime *runtime = substream->runtime;
+ const struct dummy_timer_ops *ops;
int err;
- dummy->timer_ops = &dummy_systimer_ops;
+ ops = &dummy_systimer_ops;
#ifdef CONFIG_HIGH_RES_TIMERS
if (hrtimer)
- dummy->timer_ops = &dummy_hrtimer_ops;
+ ops = &dummy_hrtimer_ops;
#endif
- err = dummy->timer_ops->create(substream);
+ err = ops->create(substream);
if (err < 0)
return err;
+ get_dummy_ops(substream) = ops;
runtime->hw = dummy->pcm_hw;
if (substream->pcm->device & 1) {
@@ -594,7 +596,7 @@
err = model->capture_constraints(substream->runtime);
}
if (err < 0) {
- dummy->timer_ops->free(substream);
+ get_dummy_ops(substream)->free(substream);
return err;
}
return 0;
@@ -602,8 +604,7 @@
static int dummy_pcm_close(struct snd_pcm_substream *substream)
{
- struct snd_dummy *dummy = snd_pcm_substream_chip(substream);
- dummy->timer_ops->free(substream);
+ get_dummy_ops(substream)->free(substream);
return 0;
}
diff --git a/sound/firewire/digi00x/amdtp-dot.c b/sound/firewire/digi00x/amdtp-dot.c
index b02a5e8c..0ac92ab 100644
--- a/sound/firewire/digi00x/amdtp-dot.c
+++ b/sound/firewire/digi00x/amdtp-dot.c
@@ -63,7 +63,7 @@
#define BYTE_PER_SAMPLE (4)
#define MAGIC_DOT_BYTE (2)
#define MAGIC_BYTE_OFF(x) (((x) * BYTE_PER_SAMPLE) + MAGIC_DOT_BYTE)
-static const u8 dot_scrt(const u8 idx, const unsigned int off)
+static u8 dot_scrt(const u8 idx, const unsigned int off)
{
/*
* the length of the added pattern only depends on the lower nibble
diff --git a/sound/firewire/tascam/tascam-transaction.c b/sound/firewire/tascam/tascam-transaction.c
index 904ce03..040a96d 100644
--- a/sound/firewire/tascam/tascam-transaction.c
+++ b/sound/firewire/tascam/tascam-transaction.c
@@ -230,6 +230,7 @@
return err;
error:
fw_core_remove_address_handler(&tscm->async_handler);
+ tscm->async_handler.callback_data = NULL;
return err;
}
@@ -276,6 +277,9 @@
__be32 reg;
unsigned int i;
+ if (tscm->async_handler.callback_data == NULL)
+ return;
+
/* Turn off FireWire LED. */
reg = cpu_to_be32(0x0000008e);
snd_fw_transaction(tscm->unit, TCODE_WRITE_QUADLET_REQUEST,
@@ -297,6 +301,8 @@
®, sizeof(reg), 0);
fw_core_remove_address_handler(&tscm->async_handler);
+ tscm->async_handler.callback_data = NULL;
+
for (i = 0; i < TSCM_MIDI_OUT_PORT_MAX; i++)
snd_fw_async_midi_port_destroy(&tscm->out_ports[i]);
}
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index ee0bc18..e281c33 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -21,7 +21,6 @@
.pcm_playback_analog_channels = 8,
.midi_capture_ports = 4,
.midi_playback_ports = 4,
- .is_controller = true,
},
{
.name = "FW-1082",
@@ -31,9 +30,16 @@
.pcm_playback_analog_channels = 2,
.midi_capture_ports = 2,
.midi_playback_ports = 2,
- .is_controller = true,
},
- /* FW-1804 may be supported. */
+ {
+ .name = "FW-1804",
+ .has_adat = true,
+ .has_spdif = true,
+ .pcm_capture_analog_channels = 8,
+ .pcm_playback_analog_channels = 2,
+ .midi_capture_ports = 2,
+ .midi_playback_ports = 4,
+ },
};
static int identify_model(struct snd_tscm *tscm)
diff --git a/sound/firewire/tascam/tascam.h b/sound/firewire/tascam/tascam.h
index 2d028d2..30ab77e 100644
--- a/sound/firewire/tascam/tascam.h
+++ b/sound/firewire/tascam/tascam.h
@@ -39,7 +39,6 @@
unsigned int pcm_playback_analog_channels;
unsigned int midi_capture_ports;
unsigned int midi_playback_ports;
- bool is_controller;
};
#define TSCM_MIDI_IN_PORT_MAX 4
@@ -72,9 +71,6 @@
struct snd_fw_async_midi_port out_ports[TSCM_MIDI_OUT_PORT_MAX];
u8 running_status[TSCM_MIDI_OUT_PORT_MAX];
bool on_sysex[TSCM_MIDI_OUT_PORT_MAX];
-
- /* For control messages. */
- struct snd_firewire_tascam_status *status;
};
#define TSCM_ADDR_BASE 0xffff00000000ull
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index b5a17cb..8c48623 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -426,18 +426,22 @@
* @bus: HD-audio core bus
* @status: INTSTS register value
* @ask: callback to be called for woken streams
+ *
+ * Returns the bits of handled streams, or zero if no stream is handled.
*/
-void snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
+int snd_hdac_bus_handle_stream_irq(struct hdac_bus *bus, unsigned int status,
void (*ack)(struct hdac_bus *,
struct hdac_stream *))
{
struct hdac_stream *azx_dev;
u8 sd_status;
+ int handled = 0;
list_for_each_entry(azx_dev, &bus->stream_list, list) {
if (status & azx_dev->sd_int_sta_mask) {
sd_status = snd_hdac_stream_readb(azx_dev, SD_STS);
snd_hdac_stream_writeb(azx_dev, SD_STS, SD_INT_MASK);
+ handled |= 1 << azx_dev->index;
if (!azx_dev->substream || !azx_dev->running ||
!(sd_status & SD_INT_COMPLETE))
continue;
@@ -445,6 +449,7 @@
ack(bus, azx_dev);
}
}
+ return handled;
}
EXPORT_SYMBOL_GPL(snd_hdac_bus_handle_stream_irq);
diff --git a/sound/pci/hda/hda_controller.c b/sound/pci/hda/hda_controller.c
index 37cf9ce..27de801 100644
--- a/sound/pci/hda/hda_controller.c
+++ b/sound/pci/hda/hda_controller.c
@@ -930,6 +930,8 @@
struct azx *chip = dev_id;
struct hdac_bus *bus = azx_bus(chip);
u32 status;
+ bool active, handled = false;
+ int repeat = 0; /* count for avoiding endless loop */
#ifdef CONFIG_PM
if (azx_has_pm_runtime(chip))
@@ -939,33 +941,36 @@
spin_lock(&bus->reg_lock);
- if (chip->disabled) {
- spin_unlock(&bus->reg_lock);
- return IRQ_NONE;
- }
+ if (chip->disabled)
+ goto unlock;
- status = azx_readl(chip, INTSTS);
- if (status == 0 || status == 0xffffffff) {
- spin_unlock(&bus->reg_lock);
- return IRQ_NONE;
- }
+ do {
+ status = azx_readl(chip, INTSTS);
+ if (status == 0 || status == 0xffffffff)
+ break;
- snd_hdac_bus_handle_stream_irq(bus, status, stream_update);
+ handled = true;
+ active = false;
+ if (snd_hdac_bus_handle_stream_irq(bus, status, stream_update))
+ active = true;
- /* clear rirb int */
- status = azx_readb(chip, RIRBSTS);
- if (status & RIRB_INT_MASK) {
- if (status & RIRB_INT_RESPONSE) {
- if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
- udelay(80);
- snd_hdac_bus_update_rirb(bus);
+ /* clear rirb int */
+ status = azx_readb(chip, RIRBSTS);
+ if (status & RIRB_INT_MASK) {
+ active = true;
+ if (status & RIRB_INT_RESPONSE) {
+ if (chip->driver_caps & AZX_DCAPS_CTX_WORKAROUND)
+ udelay(80);
+ snd_hdac_bus_update_rirb(bus);
+ }
+ azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
}
- azx_writeb(chip, RIRBSTS, RIRB_INT_MASK);
- }
+ } while (active && ++repeat < 10);
+ unlock:
spin_unlock(&bus->reg_lock);
- return IRQ_HANDLED;
+ return IRQ_RETVAL(handled);
}
EXPORT_SYMBOL_GPL(azx_interrupt);
diff --git a/sound/pci/hda/hda_generic.c b/sound/pci/hda/hda_generic.c
index 30c8efe..7ca5b89 100644
--- a/sound/pci/hda/hda_generic.c
+++ b/sound/pci/hda/hda_generic.c
@@ -4028,9 +4028,9 @@
struct hda_jack_callback *jack,
bool on)
{
- if (jack && jack->tbl->nid)
+ if (jack && jack->nid)
sync_power_state_change(codec,
- set_pin_power_jack(codec, jack->tbl->nid, on));
+ set_pin_power_jack(codec, jack->nid, on));
}
/* callback only doing power up -- called at first */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4045dca..e5240cb 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -363,7 +363,10 @@
((pci)->device == 0x0d0c) || \
((pci)->device == 0x160c))
-#define IS_BROXTON(pci) ((pci)->device == 0x5a98)
+#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
+#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
+#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
+#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
static char *driver_short_names[] = {
[AZX_DRIVER_ICH] = "HDA Intel",
@@ -540,13 +543,13 @@
if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL)
snd_hdac_set_codec_wakeup(bus, true);
- if (IS_BROXTON(pci)) {
+ if (IS_SKL_PLUS(pci)) {
pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
val = val & ~INTEL_HDA_CGCTL_MISCBDCGE;
pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
}
azx_init_chip(chip, full_reset);
- if (IS_BROXTON(pci)) {
+ if (IS_SKL_PLUS(pci)) {
pci_read_config_dword(pci, INTEL_HDA_CGCTL, &val);
val = val | INTEL_HDA_CGCTL_MISCBDCGE;
pci_write_config_dword(pci, INTEL_HDA_CGCTL, val);
@@ -555,7 +558,7 @@
snd_hdac_set_codec_wakeup(bus, false);
/* reduce dma latency to avoid noise */
- if (IS_BROXTON(pci))
+ if (IS_BXT(pci))
bxt_reduce_dma_latency(chip);
}
@@ -977,11 +980,6 @@
/* put codec down to D3 at hibernation for Intel SKL+;
* otherwise BIOS may still access the codec and screw up the driver
*/
-#define IS_SKL(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0xa170)
-#define IS_SKL_LP(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x9d70)
-#define IS_BXT(pci) ((pci)->vendor == 0x8086 && (pci)->device == 0x5a98)
-#define IS_SKL_PLUS(pci) (IS_SKL(pci) || IS_SKL_LP(pci) || IS_BXT(pci))
-
static int azx_freeze_noirq(struct device *dev)
{
struct pci_dev *pci = to_pci_dev(dev);
@@ -2168,10 +2166,10 @@
struct hda_intel *hda;
if (card) {
- /* flush the pending probing work */
+ /* cancel the pending probing work */
chip = card->private_data;
hda = container_of(chip, struct hda_intel, chip);
- flush_work(&hda->probe_work);
+ cancel_work_sync(&hda->probe_work);
snd_card_free(card);
}
diff --git a/sound/pci/hda/hda_jack.c b/sound/pci/hda/hda_jack.c
index c945e25..a33234e 100644
--- a/sound/pci/hda/hda_jack.c
+++ b/sound/pci/hda/hda_jack.c
@@ -259,7 +259,7 @@
if (!callback)
return ERR_PTR(-ENOMEM);
callback->func = func;
- callback->tbl = jack;
+ callback->nid = jack->nid;
callback->next = jack->callback;
jack->callback = callback;
}
diff --git a/sound/pci/hda/hda_jack.h b/sound/pci/hda/hda_jack.h
index 858708a..e9814c0 100644
--- a/sound/pci/hda/hda_jack.h
+++ b/sound/pci/hda/hda_jack.h
@@ -21,7 +21,7 @@
typedef void (*hda_jack_callback_fn) (struct hda_codec *, struct hda_jack_callback *);
struct hda_jack_callback {
- struct hda_jack_tbl *tbl;
+ hda_nid_t nid;
hda_jack_callback_fn func;
unsigned int private_data; /* arbitrary data */
struct hda_jack_callback *next;
diff --git a/sound/pci/hda/patch_ca0132.c b/sound/pci/hda/patch_ca0132.c
index 4ef2259..9ceb2bc 100644
--- a/sound/pci/hda/patch_ca0132.c
+++ b/sound/pci/hda/patch_ca0132.c
@@ -4427,13 +4427,16 @@
static void hp_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
{
struct ca0132_spec *spec = codec->spec;
+ struct hda_jack_tbl *tbl;
/* Delay enabling the HP amp, to let the mic-detection
* state machine run.
*/
cancel_delayed_work_sync(&spec->unsol_hp_work);
schedule_delayed_work(&spec->unsol_hp_work, msecs_to_jiffies(500));
- cb->tbl->block_report = 1;
+ tbl = snd_hda_jack_tbl_get(codec, cb->nid);
+ if (tbl)
+ tbl->block_report = 1;
}
static void amic_callback(struct hda_codec *codec, struct hda_jack_callback *cb)
diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
index 1f52b55..8ee78db 100644
--- a/sound/pci/hda/patch_hdmi.c
+++ b/sound/pci/hda/patch_hdmi.c
@@ -448,7 +448,8 @@
eld = &per_pin->sink_eld;
mutex_lock(&per_pin->lock);
- if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data)) {
+ if (eld->eld_size > ARRAY_SIZE(ucontrol->value.bytes.data) ||
+ eld->eld_size > ELD_MAX_SIZE) {
mutex_unlock(&per_pin->lock);
snd_BUG();
return -EINVAL;
@@ -1193,7 +1194,7 @@
static void jack_callback(struct hda_codec *codec,
struct hda_jack_callback *jack)
{
- check_presence_and_report(codec, jack->tbl->nid);
+ check_presence_and_report(codec, jack->nid);
}
static void hdmi_intrinsic_event(struct hda_codec *codec, unsigned int res)
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index 21992fb..1f357cd 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -282,7 +282,7 @@
uctl = kzalloc(sizeof(*uctl), GFP_KERNEL);
if (!uctl)
return;
- val = snd_hda_codec_read(codec, jack->tbl->nid, 0,
+ val = snd_hda_codec_read(codec, jack->nid, 0,
AC_VERB_GET_VOLUME_KNOB_CONTROL, 0);
val &= HDA_AMP_VOLMASK;
uctl->value.integer.value[0] = val;
@@ -1787,7 +1787,6 @@
ALC882_FIXUP_NO_PRIMARY_HP,
ALC887_FIXUP_ASUS_BASS,
ALC887_FIXUP_BASS_CHMAP,
- ALC882_FIXUP_DISABLE_AAMIX,
};
static void alc889_fixup_coef(struct hda_codec *codec,
@@ -1949,8 +1948,6 @@
static void alc_fixup_bass_chmap(struct hda_codec *codec,
const struct hda_fixup *fix, int action);
-static void alc_fixup_disable_aamix(struct hda_codec *codec,
- const struct hda_fixup *fix, int action);
static const struct hda_fixup alc882_fixups[] = {
[ALC882_FIXUP_ABIT_AW9D_MAX] = {
@@ -2188,10 +2185,6 @@
.type = HDA_FIXUP_FUNC,
.v.func = alc_fixup_bass_chmap,
},
- [ALC882_FIXUP_DISABLE_AAMIX] = {
- .type = HDA_FIXUP_FUNC,
- .v.func = alc_fixup_disable_aamix,
- },
};
static const struct snd_pci_quirk alc882_fixup_tbl[] = {
@@ -2230,6 +2223,7 @@
SND_PCI_QUIRK(0x104d, 0x9047, "Sony Vaio TT", ALC889_FIXUP_VAIO_TT),
SND_PCI_QUIRK(0x104d, 0x905a, "Sony Vaio Z", ALC882_FIXUP_NO_PRIMARY_HP),
SND_PCI_QUIRK(0x104d, 0x9043, "Sony Vaio VGC-LN51JGB", ALC882_FIXUP_NO_PRIMARY_HP),
+ SND_PCI_QUIRK(0x104d, 0x9044, "Sony VAIO AiO", ALC882_FIXUP_NO_PRIMARY_HP),
/* All Apple entries are in codec SSIDs */
SND_PCI_QUIRK(0x106b, 0x00a0, "MacBookPro 3,1", ALC889_FIXUP_MBP_VREF),
@@ -2259,7 +2253,6 @@
SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD),
SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3),
SND_PCI_QUIRK(0x1458, 0xa002, "Gigabyte EP45-DS3/Z87X-UD3H", ALC889_FIXUP_FRONT_HP_NO_PRESENCE),
- SND_PCI_QUIRK(0x1458, 0xa182, "Gigabyte Z170X-UD3", ALC882_FIXUP_DISABLE_AAMIX),
SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX),
SND_PCI_QUIRK_VENDOR(0x1558, "Clevo laptop", ALC882_FIXUP_EAPD),
SND_PCI_QUIRK(0x161f, 0x2054, "Medion laptop", ALC883_FIXUP_EAPD),
@@ -3808,6 +3801,10 @@
static void alc_headset_mode_default(struct hda_codec *codec)
{
+ static struct coef_fw coef0225[] = {
+ UPDATE_COEF(0x45, 0x3f<<10, 0x34<<10),
+ {}
+ };
static struct coef_fw coef0255[] = {
WRITE_COEF(0x45, 0xc089),
WRITE_COEF(0x45, 0xc489),
@@ -3849,6 +3846,9 @@
};
switch (codec->core.vendor_id) {
+ case 0x10ec0225:
+ alc_process_coef_fw(codec, coef0225);
+ break;
case 0x10ec0255:
case 0x10ec0256:
alc_process_coef_fw(codec, coef0255);
@@ -4756,6 +4756,9 @@
ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE,
ALC293_FIXUP_LENOVO_SPK_NOISE,
ALC233_FIXUP_LENOVO_LINE2_MIC_HOTKEY,
+ ALC255_FIXUP_DELL_SPK_NOISE,
+ ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
+ ALC280_FIXUP_HP_HEADSET_MIC,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -5375,6 +5378,29 @@
.type = HDA_FIXUP_FUNC,
.v.func = alc233_fixup_lenovo_line2_mic_hotkey,
},
+ [ALC255_FIXUP_DELL_SPK_NOISE] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_aamix,
+ .chained = true,
+ .chain_id = ALC255_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
+ [ALC225_FIXUP_DELL1_MIC_NO_PRESENCE] = {
+ .type = HDA_FIXUP_VERBS,
+ .v.verbs = (const struct hda_verb[]) {
+ /* Disable pass-through path for FRONT 14h */
+ { 0x20, AC_VERB_SET_COEF_INDEX, 0x36 },
+ { 0x20, AC_VERB_SET_PROC_COEF, 0x57d7 },
+ {}
+ },
+ .chained = true,
+ .chain_id = ALC269_FIXUP_DELL1_MIC_NO_PRESENCE
+ },
+ [ALC280_FIXUP_HP_HEADSET_MIC] = {
+ .type = HDA_FIXUP_FUNC,
+ .v.func = alc_fixup_disable_aamix,
+ .chained = true,
+ .chain_id = ALC269_FIXUP_HEADSET_MIC,
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5417,6 +5443,7 @@
SND_PCI_QUIRK(0x1028, 0x06df, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x06e0, "Dell", ALC293_FIXUP_DISABLE_AAMIX_MULTIJACK),
SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x164a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1028, 0x164b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x1586, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC2),
@@ -5477,6 +5504,7 @@
SND_PCI_QUIRK(0x103c, 0x2335, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
+ SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
SND_PCI_QUIRK(0x1043, 0x106d, "Asus K53BE", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
SND_PCI_QUIRK(0x1043, 0x115d, "Asus 1015E", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
@@ -5645,10 +5673,10 @@
{0x21, 0x03211020}
static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
- SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC225_STANDARD_PINS,
{0x14, 0x901701a0}),
- SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
+ SND_HDA_PIN_QUIRK(0x10ec0225, 0x1028, "Dell", ALC225_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC225_STANDARD_PINS,
{0x14, 0x901701b0}),
SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell", ALC255_FIXUP_DELL2_MIC_NO_PRESENCE,
diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
index 2c7c5eb..37b70f8 100644
--- a/sound/pci/hda/patch_sigmatel.c
+++ b/sound/pci/hda/patch_sigmatel.c
@@ -493,9 +493,9 @@
if (!spec->num_pwrs)
return;
- if (jack && jack->tbl->nid) {
- stac_toggle_power_map(codec, jack->tbl->nid,
- snd_hda_jack_detect(codec, jack->tbl->nid),
+ if (jack && jack->nid) {
+ stac_toggle_power_map(codec, jack->nid,
+ snd_hda_jack_detect(codec, jack->nid),
true);
return;
}
diff --git a/sound/soc/amd/acp-pcm-dma.c b/sound/soc/amd/acp-pcm-dma.c
index 3191e0a..d1fb035 100644
--- a/sound/soc/amd/acp-pcm-dma.c
+++ b/sound/soc/amd/acp-pcm-dma.c
@@ -635,6 +635,7 @@
SNDRV_PCM_HW_PARAM_PERIODS);
if (ret < 0) {
dev_err(prtd->platform->dev, "set integer constraint failed\n");
+ kfree(adata);
return ret;
}
diff --git a/sound/soc/codecs/arizona.c b/sound/soc/codecs/arizona.c
index 33143fe..9178531 100644
--- a/sound/soc/codecs/arizona.c
+++ b/sound/soc/codecs/arizona.c
@@ -1929,6 +1929,25 @@
{ 1000000, 13500000, 0, 1 },
};
+static const unsigned int pseudo_fref_max[ARIZONA_FLL_MAX_FRATIO] = {
+ 13500000,
+ 6144000,
+ 6144000,
+ 3072000,
+ 3072000,
+ 2822400,
+ 2822400,
+ 1536000,
+ 1536000,
+ 1536000,
+ 1536000,
+ 1536000,
+ 1536000,
+ 1536000,
+ 1536000,
+ 768000,
+};
+
static struct {
unsigned int min;
unsigned int max;
@@ -2042,16 +2061,32 @@
/* Adjust FRATIO/refdiv to avoid integer mode if possible */
refdiv = cfg->refdiv;
+ arizona_fll_dbg(fll, "pseudo: initial ratio=%u fref=%u refdiv=%u\n",
+ init_ratio, Fref, refdiv);
+
while (div <= ARIZONA_FLL_MAX_REFDIV) {
for (ratio = init_ratio; ratio <= ARIZONA_FLL_MAX_FRATIO;
ratio++) {
if ((ARIZONA_FLL_VCO_CORNER / 2) /
- (fll->vco_mult * ratio) < Fref)
+ (fll->vco_mult * ratio) < Fref) {
+ arizona_fll_dbg(fll, "pseudo: hit VCO corner\n");
break;
+ }
+
+ if (Fref > pseudo_fref_max[ratio - 1]) {
+ arizona_fll_dbg(fll,
+ "pseudo: exceeded max fref(%u) for ratio=%u\n",
+ pseudo_fref_max[ratio - 1],
+ ratio);
+ break;
+ }
if (target % (ratio * Fref)) {
cfg->refdiv = refdiv;
cfg->fratio = ratio - 1;
+ arizona_fll_dbg(fll,
+ "pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n",
+ Fref, refdiv, div, ratio);
return ratio;
}
}
@@ -2060,6 +2095,9 @@
if (target % (ratio * Fref)) {
cfg->refdiv = refdiv;
cfg->fratio = ratio - 1;
+ arizona_fll_dbg(fll,
+ "pseudo: found fref=%u refdiv=%d(%d) ratio=%d\n",
+ Fref, refdiv, div, ratio);
return ratio;
}
}
@@ -2068,6 +2106,9 @@
Fref /= 2;
refdiv++;
init_ratio = arizona_find_fratio(Fref, NULL);
+ arizona_fll_dbg(fll,
+ "pseudo: change fref=%u refdiv=%d(%d) ratio=%u\n",
+ Fref, refdiv, div, init_ratio);
}
arizona_fll_warn(fll, "Falling back to integer mode operation\n");
diff --git a/sound/soc/codecs/rt286.c b/sound/soc/codecs/rt286.c
index bc08f0c..1bd3164 100644
--- a/sound/soc/codecs/rt286.c
+++ b/sound/soc/codecs/rt286.c
@@ -266,6 +266,8 @@
} else {
*mic = false;
regmap_write(rt286->regmap, RT286_SET_MIC1, 0x20);
+ regmap_update_bits(rt286->regmap,
+ RT286_CBJ_CTRL1, 0x0400, 0x0000);
}
} else {
regmap_read(rt286->regmap, RT286_GET_HP_SENSE, &buf);
@@ -470,24 +472,6 @@
return 0;
}
-static int rt286_vref_event(struct snd_soc_dapm_widget *w,
- struct snd_kcontrol *kcontrol, int event)
-{
- struct snd_soc_codec *codec = snd_soc_dapm_to_codec(w->dapm);
-
- switch (event) {
- case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec,
- RT286_CBJ_CTRL1, 0x0400, 0x0000);
- mdelay(50);
- break;
- default:
- return 0;
- }
-
- return 0;
-}
-
static int rt286_ldo2_event(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -536,7 +520,7 @@
SND_SOC_DAPM_SUPPLY_S("HV", 1, RT286_POWER_CTRL1,
12, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY("VREF", RT286_POWER_CTRL1,
- 0, 1, rt286_vref_event, SND_SOC_DAPM_PRE_PMU),
+ 0, 1, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("LDO1", 1, RT286_POWER_CTRL2,
2, 0, NULL, 0),
SND_SOC_DAPM_SUPPLY_S("LDO2", 2, RT286_POWER_CTRL1,
@@ -911,8 +895,6 @@
case SND_SOC_BIAS_ON:
mdelay(10);
snd_soc_update_bits(codec,
- RT286_CBJ_CTRL1, 0x0400, 0x0400);
- snd_soc_update_bits(codec,
RT286_DC_GAIN, 0x200, 0x0);
break;
@@ -920,8 +902,6 @@
case SND_SOC_BIAS_STANDBY:
snd_soc_write(codec,
RT286_SET_AUDIO_POWER, AC_PWRST_D3);
- snd_soc_update_bits(codec,
- RT286_CBJ_CTRL1, 0x0400, 0x0000);
break;
default:
diff --git a/sound/soc/codecs/rt5645.c b/sound/soc/codecs/rt5645.c
index c61d38b..93e8c90 100644
--- a/sound/soc/codecs/rt5645.c
+++ b/sound/soc/codecs/rt5645.c
@@ -776,7 +776,7 @@
/* IN1/IN2 Control */
SOC_SINGLE_TLV("IN1 Boost", RT5645_IN1_CTRL1,
- RT5645_BST_SFT1, 8, 0, bst_tlv),
+ RT5645_BST_SFT1, 12, 0, bst_tlv),
SOC_SINGLE_TLV("IN2 Boost", RT5645_IN2_CTRL,
RT5645_BST_SFT2, 8, 0, bst_tlv),
diff --git a/sound/soc/codecs/rt5659.c b/sound/soc/codecs/rt5659.c
index 820d8fa..fb8ea05 100644
--- a/sound/soc/codecs/rt5659.c
+++ b/sound/soc/codecs/rt5659.c
@@ -3985,7 +3985,6 @@
if (rt5659 == NULL)
return -ENOMEM;
- rt5659->i2c = i2c;
i2c_set_clientdata(i2c, rt5659);
if (pdata)
@@ -4157,24 +4156,17 @@
INIT_DELAYED_WORK(&rt5659->jack_detect_work, rt5659_jack_detect_work);
- if (rt5659->i2c->irq) {
- ret = request_threaded_irq(rt5659->i2c->irq, NULL, rt5659_irq,
- IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
+ if (i2c->irq) {
+ ret = devm_request_threaded_irq(&i2c->dev, i2c->irq, NULL,
+ rt5659_irq, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING
| IRQF_ONESHOT, "rt5659", rt5659);
if (ret)
dev_err(&i2c->dev, "Failed to reguest IRQ: %d\n", ret);
}
- ret = snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659,
+ return snd_soc_register_codec(&i2c->dev, &soc_codec_dev_rt5659,
rt5659_dai, ARRAY_SIZE(rt5659_dai));
-
- if (ret) {
- if (rt5659->i2c->irq)
- free_irq(rt5659->i2c->irq, rt5659);
- }
-
- return 0;
}
static int rt5659_i2c_remove(struct i2c_client *i2c)
@@ -4191,24 +4183,29 @@
regmap_write(rt5659->regmap, RT5659_RESET, 0);
}
+#ifdef CONFIG_OF
static const struct of_device_id rt5659_of_match[] = {
{ .compatible = "realtek,rt5658", },
{ .compatible = "realtek,rt5659", },
- {},
+ { },
};
+MODULE_DEVICE_TABLE(of, rt5659_of_match);
+#endif
+#ifdef CONFIG_ACPI
static struct acpi_device_id rt5659_acpi_match[] = {
- { "10EC5658", 0},
- { "10EC5659", 0},
- { },
+ { "10EC5658", 0, },
+ { "10EC5659", 0, },
+ { },
};
MODULE_DEVICE_TABLE(acpi, rt5659_acpi_match);
+#endif
struct i2c_driver rt5659_i2c_driver = {
.driver = {
.name = "rt5659",
.owner = THIS_MODULE,
- .of_match_table = rt5659_of_match,
+ .of_match_table = of_match_ptr(rt5659_of_match),
.acpi_match_table = ACPI_PTR(rt5659_acpi_match),
},
.probe = rt5659_i2c_probe,
diff --git a/sound/soc/codecs/rt5659.h b/sound/soc/codecs/rt5659.h
index 8f07ee9..d31c9e5 100644
--- a/sound/soc/codecs/rt5659.h
+++ b/sound/soc/codecs/rt5659.h
@@ -1792,7 +1792,6 @@
struct snd_soc_codec *codec;
struct rt5659_platform_data pdata;
struct regmap *regmap;
- struct i2c_client *i2c;
struct gpio_desc *gpiod_ldo1_en;
struct gpio_desc *gpiod_reset;
struct snd_soc_jack *hs_jack;
diff --git a/sound/soc/codecs/sigmadsp-i2c.c b/sound/soc/codecs/sigmadsp-i2c.c
index 21ca3a5..d374c18 100644
--- a/sound/soc/codecs/sigmadsp-i2c.c
+++ b/sound/soc/codecs/sigmadsp-i2c.c
@@ -31,7 +31,10 @@
kfree(buf);
- return ret;
+ if (ret < 0)
+ return ret;
+
+ return 0;
}
static int sigmadsp_read_i2c(void *control_data,
diff --git a/sound/soc/codecs/wm5110.c b/sound/soc/codecs/wm5110.c
index 6088d30..97c0f1e 100644
--- a/sound/soc/codecs/wm5110.c
+++ b/sound/soc/codecs/wm5110.c
@@ -2382,6 +2382,7 @@
static int wm5110_remove(struct platform_device *pdev)
{
+ snd_soc_unregister_platform(&pdev->dev);
snd_soc_unregister_codec(&pdev->dev);
pm_runtime_disable(&pdev->dev);
diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c
index ff23772..d7f444f 100644
--- a/sound/soc/codecs/wm8960.c
+++ b/sound/soc/codecs/wm8960.c
@@ -240,13 +240,13 @@
SOC_DOUBLE_R("Capture Switch", WM8960_LINVOL, WM8960_RINVOL,
7, 1, 1),
-SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
- WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
-SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume",
- WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv),
SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT3 Volume",
- WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv),
+ WM8960_INBMIX1, 4, 7, 0, lineinboost_tlv),
SOC_SINGLE_TLV("Left Input Boost Mixer LINPUT2 Volume",
+ WM8960_INBMIX1, 1, 7, 0, lineinboost_tlv),
+SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT3 Volume",
+ WM8960_INBMIX2, 4, 7, 0, lineinboost_tlv),
+SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT2 Volume",
WM8960_INBMIX2, 1, 7, 0, lineinboost_tlv),
SOC_SINGLE_TLV("Right Input Boost Mixer RINPUT1 Volume",
WM8960_RINPATH, 4, 3, 0, micboost_tlv),
@@ -643,29 +643,31 @@
return -EINVAL;
}
- /* check if the sysclk frequency is available. */
- for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
- if (sysclk_divs[i] == -1)
- continue;
- sysclk = freq_out / sysclk_divs[i];
- for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
- if (sysclk == dac_divs[j] * lrclk) {
+ if (wm8960->clk_id != WM8960_SYSCLK_PLL) {
+ /* check if the sysclk frequency is available. */
+ for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
+ if (sysclk_divs[i] == -1)
+ continue;
+ sysclk = freq_out / sysclk_divs[i];
+ for (j = 0; j < ARRAY_SIZE(dac_divs); ++j) {
+ if (sysclk != dac_divs[j] * lrclk)
+ continue;
for (k = 0; k < ARRAY_SIZE(bclk_divs); ++k)
if (sysclk == bclk * bclk_divs[k] / 10)
break;
if (k != ARRAY_SIZE(bclk_divs))
break;
}
+ if (j != ARRAY_SIZE(dac_divs))
+ break;
}
- if (j != ARRAY_SIZE(dac_divs))
- break;
- }
- if (i != ARRAY_SIZE(sysclk_divs)) {
- goto configure_clock;
- } else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
- dev_err(codec->dev, "failed to configure clock\n");
- return -EINVAL;
+ if (i != ARRAY_SIZE(sysclk_divs)) {
+ goto configure_clock;
+ } else if (wm8960->clk_id != WM8960_SYSCLK_AUTO) {
+ dev_err(codec->dev, "failed to configure clock\n");
+ return -EINVAL;
+ }
}
/* get a available pll out frequency and set pll */
for (i = 0; i < ARRAY_SIZE(sysclk_divs); ++i) {
diff --git a/sound/soc/dwc/designware_i2s.c b/sound/soc/dwc/designware_i2s.c
index ce664c2..bff258d 100644
--- a/sound/soc/dwc/designware_i2s.c
+++ b/sound/soc/dwc/designware_i2s.c
@@ -645,6 +645,8 @@
dev->dev = &pdev->dev;
+ dev->i2s_reg_comp1 = I2S_COMP_PARAM_1;
+ dev->i2s_reg_comp2 = I2S_COMP_PARAM_2;
if (pdata) {
dev->capability = pdata->cap;
clk_id = NULL;
@@ -652,9 +654,6 @@
if (dev->quirks & DW_I2S_QUIRK_COMP_REG_OFFSET) {
dev->i2s_reg_comp1 = pdata->i2s_reg_comp1;
dev->i2s_reg_comp2 = pdata->i2s_reg_comp2;
- } else {
- dev->i2s_reg_comp1 = I2S_COMP_PARAM_1;
- dev->i2s_reg_comp2 = I2S_COMP_PARAM_2;
}
ret = dw_configure_dai_by_pd(dev, dw_i2s_dai, res, pdata);
} else {
diff --git a/sound/soc/fsl/fsl_ssi.c b/sound/soc/fsl/fsl_ssi.c
index 40dfd8a..ed8de10 100644
--- a/sound/soc/fsl/fsl_ssi.c
+++ b/sound/soc/fsl/fsl_ssi.c
@@ -112,20 +112,6 @@
struct fsl_ssi_reg_val tx;
};
-static const struct reg_default fsl_ssi_reg_defaults[] = {
- {CCSR_SSI_SCR, 0x00000000},
- {CCSR_SSI_SIER, 0x00003003},
- {CCSR_SSI_STCR, 0x00000200},
- {CCSR_SSI_SRCR, 0x00000200},
- {CCSR_SSI_STCCR, 0x00040000},
- {CCSR_SSI_SRCCR, 0x00040000},
- {CCSR_SSI_SACNT, 0x00000000},
- {CCSR_SSI_STMSK, 0x00000000},
- {CCSR_SSI_SRMSK, 0x00000000},
- {CCSR_SSI_SACCEN, 0x00000000},
- {CCSR_SSI_SACCDIS, 0x00000000},
-};
-
static bool fsl_ssi_readable_reg(struct device *dev, unsigned int reg)
{
switch (reg) {
@@ -190,8 +176,7 @@
.val_bits = 32,
.reg_stride = 4,
.val_format_endian = REGMAP_ENDIAN_NATIVE,
- .reg_defaults = fsl_ssi_reg_defaults,
- .num_reg_defaults = ARRAY_SIZE(fsl_ssi_reg_defaults),
+ .num_reg_defaults_raw = CCSR_SSI_SACCDIS / sizeof(uint32_t) + 1,
.readable_reg = fsl_ssi_readable_reg,
.volatile_reg = fsl_ssi_volatile_reg,
.precious_reg = fsl_ssi_precious_reg,
@@ -201,6 +186,7 @@
struct fsl_ssi_soc_data {
bool imx;
+ bool imx21regs; /* imx21-class SSI - no SACC{ST,EN,DIS} regs */
bool offline_config;
u32 sisr_write_mask;
};
@@ -303,6 +289,7 @@
static struct fsl_ssi_soc_data fsl_ssi_imx21 = {
.imx = true,
+ .imx21regs = true,
.offline_config = true,
.sisr_write_mask = 0,
};
@@ -586,8 +573,12 @@
*/
regmap_write(regs, CCSR_SSI_SACNT,
CCSR_SSI_SACNT_AC97EN | CCSR_SSI_SACNT_FV);
- regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
- regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
+
+ /* no SACC{ST,EN,DIS} regs on imx21-class SSI */
+ if (!ssi_private->soc->imx21regs) {
+ regmap_write(regs, CCSR_SSI_SACCDIS, 0xff);
+ regmap_write(regs, CCSR_SSI_SACCEN, 0x300);
+ }
/*
* Enable SSI, Transmit and Receive. AC97 has to communicate with the
@@ -1397,6 +1388,7 @@
struct resource *res;
void __iomem *iomem;
char name[64];
+ struct regmap_config regconfig = fsl_ssi_regconfig;
of_id = of_match_device(fsl_ssi_ids, &pdev->dev);
if (!of_id || !of_id->data)
@@ -1444,15 +1436,25 @@
return PTR_ERR(iomem);
ssi_private->ssi_phys = res->start;
+ if (ssi_private->soc->imx21regs) {
+ /*
+ * According to datasheet imx21-class SSI
+ * don't have SACC{ST,EN,DIS} regs.
+ */
+ regconfig.max_register = CCSR_SSI_SRMSK;
+ regconfig.num_reg_defaults_raw =
+ CCSR_SSI_SRMSK / sizeof(uint32_t) + 1;
+ }
+
ret = of_property_match_string(np, "clock-names", "ipg");
if (ret < 0) {
ssi_private->has_ipg_clk_name = false;
ssi_private->regs = devm_regmap_init_mmio(&pdev->dev, iomem,
- &fsl_ssi_regconfig);
+ ®config);
} else {
ssi_private->has_ipg_clk_name = true;
ssi_private->regs = devm_regmap_init_mmio_clk(&pdev->dev,
- "ipg", iomem, &fsl_ssi_regconfig);
+ "ipg", iomem, ®config);
}
if (IS_ERR(ssi_private->regs)) {
dev_err(&pdev->dev, "Failed to init register map\n");
diff --git a/sound/soc/fsl/imx-spdif.c b/sound/soc/fsl/imx-spdif.c
index a407e83..fb896b2 100644
--- a/sound/soc/fsl/imx-spdif.c
+++ b/sound/soc/fsl/imx-spdif.c
@@ -72,8 +72,6 @@
goto end;
}
- platform_set_drvdata(pdev, data);
-
end:
of_node_put(spdif_np);
diff --git a/sound/soc/generic/simple-card.c b/sound/soc/generic/simple-card.c
index 1ded881..2389ab4 100644
--- a/sound/soc/generic/simple-card.c
+++ b/sound/soc/generic/simple-card.c
@@ -99,7 +99,7 @@
if (ret && ret != -ENOTSUPP)
goto err;
}
-
+ return 0;
err:
return ret;
}
diff --git a/sound/soc/intel/Kconfig b/sound/soc/intel/Kconfig
index 803f95e..7d7c872 100644
--- a/sound/soc/intel/Kconfig
+++ b/sound/soc/intel/Kconfig
@@ -30,11 +30,15 @@
config SND_SOC_INTEL_SST
tristate
select SND_SOC_INTEL_SST_ACPI if ACPI
+ select SND_SOC_INTEL_SST_MATCH if ACPI
depends on (X86 || COMPILE_TEST)
config SND_SOC_INTEL_SST_ACPI
tristate
+config SND_SOC_INTEL_SST_MATCH
+ tristate
+
config SND_SOC_INTEL_HASWELL
tristate
@@ -57,7 +61,7 @@
config SND_SOC_INTEL_BYT_RT5640_MACH
tristate "ASoC Audio driver for Intel Baytrail with RT5640 codec"
depends on X86_INTEL_LPSS && I2C
- depends on DW_DMAC_CORE=y && (SND_SOC_INTEL_BYTCR_RT5640_MACH = n)
+ depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
select SND_SOC_INTEL_SST
select SND_SOC_INTEL_BAYTRAIL
select SND_SOC_RT5640
@@ -69,7 +73,7 @@
config SND_SOC_INTEL_BYT_MAX98090_MACH
tristate "ASoC Audio driver for Intel Baytrail with MAX98090 codec"
depends on X86_INTEL_LPSS && I2C
- depends on DW_DMAC_CORE=y
+ depends on DW_DMAC_CORE=y && (SND_SST_IPC_ACPI = n)
select SND_SOC_INTEL_SST
select SND_SOC_INTEL_BAYTRAIL
select SND_SOC_MAX98090
@@ -97,6 +101,7 @@
select SND_SOC_RT5640
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
+ select SND_SOC_INTEL_SST_MATCH if ACPI
help
This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
platforms with RT5640 audio codec.
@@ -109,6 +114,7 @@
select SND_SOC_RT5651
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
+ select SND_SOC_INTEL_SST_MATCH if ACPI
help
This adds support for ASoC machine driver for Intel(R) Baytrail and Baytrail-CR
platforms with RT5651 audio codec.
@@ -121,6 +127,7 @@
select SND_SOC_RT5670
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
+ select SND_SOC_INTEL_SST_MATCH if ACPI
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with RT5672 audio codec.
@@ -133,6 +140,7 @@
select SND_SOC_RT5645
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
+ select SND_SOC_INTEL_SST_MATCH if ACPI
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with RT5645/5650 audio codec.
@@ -145,6 +153,7 @@
select SND_SOC_TS3A227E
select SND_SST_MFLD_PLATFORM
select SND_SST_IPC_ACPI
+ select SND_SOC_INTEL_SST_MATCH if ACPI
help
This adds support for ASoC machine driver for Intel(R) Cherrytrail & Braswell
platforms with MAX98090 audio codec it also can support TI jack chip as aux device.
diff --git a/sound/soc/intel/atom/sst-mfld-platform-pcm.c b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
index 55c33dc7..52ed434 100644
--- a/sound/soc/intel/atom/sst-mfld-platform-pcm.c
+++ b/sound/soc/intel/atom/sst-mfld-platform-pcm.c
@@ -528,6 +528,7 @@
.ops = &sst_compr_dai_ops,
.playback = {
.stream_name = "Compress Playback",
+ .channels_min = 1,
},
},
/* BE CPU Dais */
diff --git a/sound/soc/intel/boards/skl_rt286.c b/sound/soc/intel/boards/skl_rt286.c
index 7396ddb..2cbcbe4 100644
--- a/sound/soc/intel/boards/skl_rt286.c
+++ b/sound/soc/intel/boards/skl_rt286.c
@@ -212,7 +212,10 @@
{
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
- channels->min = channels->max = 4;
+ if (params_channels(params) == 2)
+ channels->min = channels->max = 2;
+ else
+ channels->min = channels->max = 4;
return 0;
}
diff --git a/sound/soc/intel/common/Makefile b/sound/soc/intel/common/Makefile
index 668fdee..fbbb25c 100644
--- a/sound/soc/intel/common/Makefile
+++ b/sound/soc/intel/common/Makefile
@@ -1,13 +1,10 @@
snd-soc-sst-dsp-objs := sst-dsp.o
-ifneq ($(CONFIG_SND_SST_IPC_ACPI),)
-snd-soc-sst-acpi-objs := sst-match-acpi.o
-else
-snd-soc-sst-acpi-objs := sst-acpi.o sst-match-acpi.o
-endif
-
+snd-soc-sst-acpi-objs := sst-acpi.o
+snd-soc-sst-match-objs := sst-match-acpi.o
snd-soc-sst-ipc-objs := sst-ipc.o
snd-soc-sst-dsp-$(CONFIG_DW_DMAC_CORE) += sst-firmware.o
obj-$(CONFIG_SND_SOC_INTEL_SST) += snd-soc-sst-dsp.o snd-soc-sst-ipc.o
obj-$(CONFIG_SND_SOC_INTEL_SST_ACPI) += snd-soc-sst-acpi.o
+obj-$(CONFIG_SND_SOC_INTEL_SST_MATCH) += snd-soc-sst-match.o
diff --git a/sound/soc/intel/common/sst-acpi.c b/sound/soc/intel/common/sst-acpi.c
index 7a85c57..2c5eda1 100644
--- a/sound/soc/intel/common/sst-acpi.c
+++ b/sound/soc/intel/common/sst-acpi.c
@@ -215,6 +215,7 @@
.dma_size = SST_LPT_DSP_DMA_SIZE,
};
+#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI)
static struct sst_acpi_mach baytrail_machines[] = {
{ "10EC5640", "byt-rt5640", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL },
{ "193C9890", "byt-max98090", "intel/fw_sst_0f28.bin-48kHz_i2s_master", NULL, NULL, NULL },
@@ -231,11 +232,14 @@
.sst_id = SST_DEV_ID_BYT,
.resindex_dma_base = -1,
};
+#endif
static const struct acpi_device_id sst_acpi_match[] = {
{ "INT33C8", (unsigned long)&sst_acpi_haswell_desc },
{ "INT3438", (unsigned long)&sst_acpi_broadwell_desc },
+#if !IS_ENABLED(CONFIG_SND_SST_IPC_ACPI)
{ "80860F28", (unsigned long)&sst_acpi_baytrail_desc },
+#endif
{ }
};
MODULE_DEVICE_TABLE(acpi, sst_acpi_match);
diff --git a/sound/soc/intel/common/sst-match-acpi.c b/sound/soc/intel/common/sst-match-acpi.c
index dd077e1..3b4539d 100644
--- a/sound/soc/intel/common/sst-match-acpi.c
+++ b/sound/soc/intel/common/sst-match-acpi.c
@@ -41,3 +41,6 @@
return NULL;
}
EXPORT_SYMBOL_GPL(sst_acpi_find_machine);
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("Intel Common ACPI Match module");
diff --git a/sound/soc/intel/skylake/skl-messages.c b/sound/soc/intel/skylake/skl-messages.c
index de6dac4..4629372 100644
--- a/sound/soc/intel/skylake/skl-messages.c
+++ b/sound/soc/intel/skylake/skl-messages.c
@@ -688,14 +688,14 @@
/* get src queue index */
src_index = skl_get_queue_index(src_mcfg->m_out_pin, dst_id, out_max);
if (src_index < 0)
- return -EINVAL;
+ return 0;
msg.src_queue = src_index;
/* get dst queue index */
dst_index = skl_get_queue_index(dst_mcfg->m_in_pin, src_id, in_max);
if (dst_index < 0)
- return -EINVAL;
+ return 0;
msg.dst_queue = dst_index;
@@ -747,7 +747,7 @@
skl_dump_bind_info(ctx, src_mcfg, dst_mcfg);
- if (src_mcfg->m_state < SKL_MODULE_INIT_DONE &&
+ if (src_mcfg->m_state < SKL_MODULE_INIT_DONE ||
dst_mcfg->m_state < SKL_MODULE_INIT_DONE)
return 0;
diff --git a/sound/soc/intel/skylake/skl-pcm.c b/sound/soc/intel/skylake/skl-pcm.c
index f355325..b6e6b61 100644
--- a/sound/soc/intel/skylake/skl-pcm.c
+++ b/sound/soc/intel/skylake/skl-pcm.c
@@ -863,6 +863,7 @@
else
delay += hstream->bufsize;
}
+ delay = (hstream->bufsize == delay) ? 0 : delay;
if (delay >= hstream->period_bytes) {
dev_info(bus->dev,
diff --git a/sound/soc/intel/skylake/skl-topology.c b/sound/soc/intel/skylake/skl-topology.c
index 4624556..a294fee 100644
--- a/sound/soc/intel/skylake/skl-topology.c
+++ b/sound/soc/intel/skylake/skl-topology.c
@@ -54,12 +54,9 @@
/*
* Each pipelines needs memory to be allocated. Check if we have free memory
- * from available pool. Then only add this to pool
- * This is freed when pipe is deleted
- * Note: DSP does actual memory management we only keep track for complete
- * pool
+ * from available pool.
*/
-static bool skl_tplg_alloc_pipe_mem(struct skl *skl,
+static bool skl_is_pipe_mem_avail(struct skl *skl,
struct skl_module_cfg *mconfig)
{
struct skl_sst *ctx = skl->skl_sst;
@@ -74,10 +71,20 @@
"exceeds ppl memory available %d mem %d\n",
skl->resource.max_mem, skl->resource.mem);
return false;
+ } else {
+ return true;
}
+}
+/*
+ * Add the mem to the mem pool. This is freed when pipe is deleted.
+ * Note: DSP does actual memory management we only keep track for complete
+ * pool
+ */
+static void skl_tplg_alloc_pipe_mem(struct skl *skl,
+ struct skl_module_cfg *mconfig)
+{
skl->resource.mem += mconfig->pipe->memory_pages;
- return true;
}
/*
@@ -85,10 +92,10 @@
* quantified in MCPS (Million Clocks Per Second) required for module/pipe
*
* Each pipelines needs mcps to be allocated. Check if we have mcps for this
- * pipe. This adds the mcps to driver counter
- * This is removed on pipeline delete
+ * pipe.
*/
-static bool skl_tplg_alloc_pipe_mcps(struct skl *skl,
+
+static bool skl_is_pipe_mcps_avail(struct skl *skl,
struct skl_module_cfg *mconfig)
{
struct skl_sst *ctx = skl->skl_sst;
@@ -98,13 +105,18 @@
"%s: module_id %d instance %d\n", __func__,
mconfig->id.module_id, mconfig->id.instance_id);
dev_err(ctx->dev,
- "exceeds ppl memory available %d > mem %d\n",
+ "exceeds ppl mcps available %d > mem %d\n",
skl->resource.max_mcps, skl->resource.mcps);
return false;
+ } else {
+ return true;
}
+}
+static void skl_tplg_alloc_pipe_mcps(struct skl *skl,
+ struct skl_module_cfg *mconfig)
+{
skl->resource.mcps += mconfig->mcps;
- return true;
}
/*
@@ -411,7 +423,7 @@
mconfig = w->priv;
/* check resource available */
- if (!skl_tplg_alloc_pipe_mcps(skl, mconfig))
+ if (!skl_is_pipe_mcps_avail(skl, mconfig))
return -ENOMEM;
if (mconfig->is_loadable && ctx->dsp->fw_ops.load_mod) {
@@ -435,6 +447,7 @@
ret = skl_tplg_set_module_params(w, ctx);
if (ret < 0)
return ret;
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
}
return 0;
@@ -477,10 +490,10 @@
struct skl_sst *ctx = skl->skl_sst;
/* check resource available */
- if (!skl_tplg_alloc_pipe_mcps(skl, mconfig))
+ if (!skl_is_pipe_mcps_avail(skl, mconfig))
return -EBUSY;
- if (!skl_tplg_alloc_pipe_mem(skl, mconfig))
+ if (!skl_is_pipe_mem_avail(skl, mconfig))
return -ENOMEM;
/*
@@ -526,11 +539,15 @@
src_module = dst_module;
}
+ skl_tplg_alloc_pipe_mem(skl, mconfig);
+ skl_tplg_alloc_pipe_mcps(skl, mconfig);
+
return 0;
}
static int skl_tplg_bind_sinks(struct snd_soc_dapm_widget *w,
struct skl *skl,
+ struct snd_soc_dapm_widget *src_w,
struct skl_module_cfg *src_mconfig)
{
struct snd_soc_dapm_path *p;
@@ -547,6 +564,10 @@
dev_dbg(ctx->dev, "%s: sink widget=%s\n", __func__, p->sink->name);
next_sink = p->sink;
+
+ if (!is_skl_dsp_widget_type(p->sink))
+ return skl_tplg_bind_sinks(p->sink, skl, src_w, src_mconfig);
+
/*
* here we will check widgets in sink pipelines, so that
* can be any widgets type and we are only interested if
@@ -576,7 +597,7 @@
}
if (!sink)
- return skl_tplg_bind_sinks(next_sink, skl, src_mconfig);
+ return skl_tplg_bind_sinks(next_sink, skl, src_w, src_mconfig);
return 0;
}
@@ -605,7 +626,7 @@
* if sink is not started, start sink pipe first, then start
* this pipe
*/
- ret = skl_tplg_bind_sinks(w, skl, src_mconfig);
+ ret = skl_tplg_bind_sinks(w, skl, w, src_mconfig);
if (ret)
return ret;
@@ -773,10 +794,7 @@
continue;
}
- ret = skl_unbind_modules(ctx, src_module, dst_module);
- if (ret < 0)
- return ret;
-
+ skl_unbind_modules(ctx, src_module, dst_module);
src_module = dst_module;
}
@@ -814,9 +832,6 @@
* This is a connecter and if path is found that means
* unbind between source and sink has not happened yet
*/
- ret = skl_stop_pipe(ctx, sink_mconfig->pipe);
- if (ret < 0)
- return ret;
ret = skl_unbind_modules(ctx, src_mconfig,
sink_mconfig);
}
@@ -842,6 +857,12 @@
case SND_SOC_DAPM_PRE_PMU:
return skl_tplg_mixer_dapm_pre_pmu_event(w, skl);
+ case SND_SOC_DAPM_POST_PMU:
+ return skl_tplg_mixer_dapm_post_pmu_event(w, skl);
+
+ case SND_SOC_DAPM_PRE_PMD:
+ return skl_tplg_mixer_dapm_pre_pmd_event(w, skl);
+
case SND_SOC_DAPM_POST_PMD:
return skl_tplg_mixer_dapm_post_pmd_event(w, skl);
}
@@ -916,6 +937,13 @@
skl_get_module_params(skl->skl_sst, (u32 *)bc->params,
bc->max, bc->param_id, mconfig);
+ /* decrement size for TLV header */
+ size -= 2 * sizeof(u32);
+
+ /* check size as we don't want to send kernel data */
+ if (size > bc->max)
+ size = bc->max;
+
if (bc->params) {
if (copy_to_user(data, &bc->param_id, sizeof(u32)))
return -EFAULT;
@@ -1510,6 +1538,7 @@
&skl_tplg_ops, fw, 0);
if (ret < 0) {
dev_err(bus->dev, "tplg component load failed%d\n", ret);
+ release_firmware(fw);
return -EINVAL;
}
diff --git a/sound/soc/intel/skylake/skl.c b/sound/soc/intel/skylake/skl.c
index 443a15d..092705e 100644
--- a/sound/soc/intel/skylake/skl.c
+++ b/sound/soc/intel/skylake/skl.c
@@ -614,8 +614,6 @@
goto out_unregister;
/*configure PM */
- pm_runtime_set_autosuspend_delay(bus->dev, SKL_SUSPEND_DELAY);
- pm_runtime_use_autosuspend(bus->dev);
pm_runtime_put_noidle(bus->dev);
pm_runtime_allow(bus->dev);
diff --git a/sound/soc/mediatek/Kconfig b/sound/soc/mediatek/Kconfig
index 15c04e2..9769676 100644
--- a/sound/soc/mediatek/Kconfig
+++ b/sound/soc/mediatek/Kconfig
@@ -9,7 +9,7 @@
config SND_SOC_MT8173_MAX98090
tristate "ASoC Audio driver for MT8173 with MAX98090 codec"
- depends on SND_SOC_MEDIATEK
+ depends on SND_SOC_MEDIATEK && I2C
select SND_SOC_MAX98090
help
This adds ASoC driver for Mediatek MT8173 boards
@@ -19,7 +19,7 @@
config SND_SOC_MT8173_RT5650_RT5676
tristate "ASoC Audio driver for MT8173 with RT5650 RT5676 codecs"
- depends on SND_SOC_MEDIATEK
+ depends on SND_SOC_MEDIATEK && I2C
select SND_SOC_RT5645
select SND_SOC_RT5677
help
diff --git a/sound/soc/mxs/mxs-saif.c b/sound/soc/mxs/mxs-saif.c
index c866ade..a6c7b8d 100644
--- a/sound/soc/mxs/mxs-saif.c
+++ b/sound/soc/mxs/mxs-saif.c
@@ -381,9 +381,19 @@
__raw_writel(BM_SAIF_CTRL_CLKGATE,
saif->base + SAIF_CTRL + MXS_CLR_ADDR);
+ clk_prepare(saif->clk);
+
return 0;
}
+static void mxs_saif_shutdown(struct snd_pcm_substream *substream,
+ struct snd_soc_dai *cpu_dai)
+{
+ struct mxs_saif *saif = snd_soc_dai_get_drvdata(cpu_dai);
+
+ clk_unprepare(saif->clk);
+}
+
/*
* Should only be called when port is inactive.
* although can be called multiple times by upper layers.
@@ -424,8 +434,6 @@
return ret;
}
- /* prepare clk in hw_param, enable in trigger */
- clk_prepare(saif->clk);
if (saif != master_saif) {
/*
* Set an initial clock rate for the saif internal logic to work
@@ -611,6 +619,7 @@
static const struct snd_soc_dai_ops mxs_saif_dai_ops = {
.startup = mxs_saif_startup,
+ .shutdown = mxs_saif_shutdown,
.trigger = mxs_saif_trigger,
.prepare = mxs_saif_prepare,
.hw_params = mxs_saif_hw_params,
diff --git a/sound/soc/qcom/lpass-platform.c b/sound/soc/qcom/lpass-platform.c
index 79688aa..4aeb8e1 100644
--- a/sound/soc/qcom/lpass-platform.c
+++ b/sound/soc/qcom/lpass-platform.c
@@ -440,18 +440,18 @@
}
static int lpass_platform_alloc_buffer(struct snd_pcm_substream *substream,
- struct snd_soc_pcm_runtime *soc_runtime)
+ struct snd_soc_pcm_runtime *rt)
{
struct snd_dma_buffer *buf = &substream->dma_buffer;
size_t size = lpass_platform_pcm_hardware.buffer_bytes_max;
buf->dev.type = SNDRV_DMA_TYPE_DEV;
- buf->dev.dev = soc_runtime->dev;
+ buf->dev.dev = rt->platform->dev;
buf->private_data = NULL;
- buf->area = dma_alloc_coherent(soc_runtime->dev, size, &buf->addr,
+ buf->area = dma_alloc_coherent(rt->platform->dev, size, &buf->addr,
GFP_KERNEL);
if (!buf->area) {
- dev_err(soc_runtime->dev, "%s: Could not allocate DMA buffer\n",
+ dev_err(rt->platform->dev, "%s: Could not allocate DMA buffer\n",
__func__);
return -ENOMEM;
}
@@ -461,12 +461,12 @@
}
static void lpass_platform_free_buffer(struct snd_pcm_substream *substream,
- struct snd_soc_pcm_runtime *soc_runtime)
+ struct snd_soc_pcm_runtime *rt)
{
struct snd_dma_buffer *buf = &substream->dma_buffer;
if (buf->area) {
- dma_free_coherent(soc_runtime->dev, buf->bytes, buf->area,
+ dma_free_coherent(rt->dev, buf->bytes, buf->area,
buf->addr);
}
buf->area = NULL;
@@ -499,9 +499,6 @@
snd_soc_pcm_set_drvdata(soc_runtime, data);
- soc_runtime->dev->coherent_dma_mask = DMA_BIT_MASK(32);
- soc_runtime->dev->dma_mask = &soc_runtime->dev->coherent_dma_mask;
-
ret = lpass_platform_alloc_buffer(substream, soc_runtime);
if (ret)
return ret;
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 5a2812f..0d37079 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -310,7 +310,7 @@
};
static int dapm_kcontrol_data_alloc(struct snd_soc_dapm_widget *widget,
- struct snd_kcontrol *kcontrol)
+ struct snd_kcontrol *kcontrol, const char *ctrl_name)
{
struct dapm_kcontrol_data *data;
struct soc_mixer_control *mc;
@@ -333,7 +333,7 @@
if (mc->autodisable) {
struct snd_soc_dapm_widget template;
- name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name,
+ name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name,
"Autodisable");
if (!name) {
ret = -ENOMEM;
@@ -371,7 +371,7 @@
if (e->autodisable) {
struct snd_soc_dapm_widget template;
- name = kasprintf(GFP_KERNEL, "%s %s", kcontrol->id.name,
+ name = kasprintf(GFP_KERNEL, "%s %s", ctrl_name,
"Autodisable");
if (!name) {
ret = -ENOMEM;
@@ -871,7 +871,7 @@
kcontrol->private_free = dapm_kcontrol_free;
- ret = dapm_kcontrol_data_alloc(w, kcontrol);
+ ret = dapm_kcontrol_data_alloc(w, kcontrol, name);
if (ret) {
snd_ctl_free_one(kcontrol);
goto exit_free;
diff --git a/sound/soc/soc-pcm.c b/sound/soc/soc-pcm.c
index e898b42..1af4f23 100644
--- a/sound/soc/soc-pcm.c
+++ b/sound/soc/soc-pcm.c
@@ -1810,7 +1810,8 @@
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PREPARE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_HW_FREE) &&
(be->dpcm[stream].state != SND_SOC_DPCM_STATE_PAUSED) &&
- (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP))
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_STOP) &&
+ (be->dpcm[stream].state != SND_SOC_DPCM_STATE_SUSPEND))
continue;
dev_dbg(be->dev, "ASoC: hw_free BE %s\n",
diff --git a/sound/usb/midi.c b/sound/usb/midi.c
index cc39f63..007cf58 100644
--- a/sound/usb/midi.c
+++ b/sound/usb/midi.c
@@ -2455,7 +2455,6 @@
else
err = snd_usbmidi_create_endpoints(umidi, endpoints);
if (err < 0) {
- snd_usbmidi_free(umidi);
return err;
}
diff --git a/tools/perf/util/intel-pt.c b/tools/perf/util/intel-pt.c
index 81a2eb7..05d8158 100644
--- a/tools/perf/util/intel-pt.c
+++ b/tools/perf/util/intel-pt.c
@@ -2068,6 +2068,15 @@
err = -ENOMEM;
goto err_free_queues;
}
+
+ /*
+ * Since this thread will not be kept in any rbtree not in a
+ * list, initialize its list node so that at thread__put() the
+ * current thread lifetime assuption is kept and we don't segfault
+ * at list_del_init().
+ */
+ INIT_LIST_HEAD(&pt->unknown_thread->node);
+
err = thread__set_comm(pt->unknown_thread, "unknown", 0);
if (err)
goto err_delete_thread;
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index 4f7b0ef..813d9b2 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -399,6 +399,9 @@
{
char help[BUFSIZ];
+ if (!e)
+ return;
+
/*
* We get error directly from syscall errno ( > 0),
* or from encoded pointer's error ( < 0).
diff --git a/tools/perf/util/probe-finder.c b/tools/perf/util/probe-finder.c
index 2be10fb..4ce5c5e 100644
--- a/tools/perf/util/probe-finder.c
+++ b/tools/perf/util/probe-finder.c
@@ -686,8 +686,9 @@
pf->fb_ops = NULL;
#if _ELFUTILS_PREREQ(0, 142)
} else if (nops == 1 && pf->fb_ops[0].atom == DW_OP_call_frame_cfa &&
- pf->cfi != NULL) {
- if (dwarf_cfi_addrframe(pf->cfi, pf->addr, &frame) != 0 ||
+ (pf->cfi_eh != NULL || pf->cfi_dbg != NULL)) {
+ if ((dwarf_cfi_addrframe(pf->cfi_eh, pf->addr, &frame) != 0 &&
+ (dwarf_cfi_addrframe(pf->cfi_dbg, pf->addr, &frame) != 0)) ||
dwarf_frame_cfa(frame, &pf->fb_ops, &nops) != 0) {
pr_warning("Failed to get call frame on 0x%jx\n",
(uintmax_t)pf->addr);
@@ -1015,8 +1016,7 @@
return DWARF_CB_OK;
}
-/* Find probe points from debuginfo */
-static int debuginfo__find_probes(struct debuginfo *dbg,
+static int debuginfo__find_probe_location(struct debuginfo *dbg,
struct probe_finder *pf)
{
struct perf_probe_point *pp = &pf->pev->point;
@@ -1025,27 +1025,6 @@
Dwarf_Die *diep;
int ret = 0;
-#if _ELFUTILS_PREREQ(0, 142)
- Elf *elf;
- GElf_Ehdr ehdr;
- GElf_Shdr shdr;
-
- /* Get the call frame information from this dwarf */
- elf = dwarf_getelf(dbg->dbg);
- if (elf == NULL)
- return -EINVAL;
-
- if (gelf_getehdr(elf, &ehdr) == NULL)
- return -EINVAL;
-
- if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
- shdr.sh_type == SHT_PROGBITS) {
- pf->cfi = dwarf_getcfi_elf(elf);
- } else {
- pf->cfi = dwarf_getcfi(dbg->dbg);
- }
-#endif
-
off = 0;
pf->lcache = intlist__new(NULL);
if (!pf->lcache)
@@ -1108,6 +1087,39 @@
return ret;
}
+/* Find probe points from debuginfo */
+static int debuginfo__find_probes(struct debuginfo *dbg,
+ struct probe_finder *pf)
+{
+ int ret = 0;
+
+#if _ELFUTILS_PREREQ(0, 142)
+ Elf *elf;
+ GElf_Ehdr ehdr;
+ GElf_Shdr shdr;
+
+ if (pf->cfi_eh || pf->cfi_dbg)
+ return debuginfo__find_probe_location(dbg, pf);
+
+ /* Get the call frame information from this dwarf */
+ elf = dwarf_getelf(dbg->dbg);
+ if (elf == NULL)
+ return -EINVAL;
+
+ if (gelf_getehdr(elf, &ehdr) == NULL)
+ return -EINVAL;
+
+ if (elf_section_by_name(elf, &ehdr, &shdr, ".eh_frame", NULL) &&
+ shdr.sh_type == SHT_PROGBITS)
+ pf->cfi_eh = dwarf_getcfi_elf(elf);
+
+ pf->cfi_dbg = dwarf_getcfi(dbg->dbg);
+#endif
+
+ ret = debuginfo__find_probe_location(dbg, pf);
+ return ret;
+}
+
struct local_vars_finder {
struct probe_finder *pf;
struct perf_probe_arg *args;
diff --git a/tools/perf/util/probe-finder.h b/tools/perf/util/probe-finder.h
index bed8271..0aec770 100644
--- a/tools/perf/util/probe-finder.h
+++ b/tools/perf/util/probe-finder.h
@@ -76,7 +76,10 @@
/* For variable searching */
#if _ELFUTILS_PREREQ(0, 142)
- Dwarf_CFI *cfi; /* Call Frame Information */
+ /* Call Frame Information from .eh_frame */
+ Dwarf_CFI *cfi_eh;
+ /* Call Frame Information from .debug_frame */
+ Dwarf_CFI *cfi_dbg;
#endif
Dwarf_Op *fb_ops; /* Frame base attribute */
struct perf_probe_arg *pvar; /* Current target variable */
diff --git a/tools/perf/util/stat.c b/tools/perf/util/stat.c
index 2b58edc..afb0c45 100644
--- a/tools/perf/util/stat.c
+++ b/tools/perf/util/stat.c
@@ -311,6 +311,16 @@
aggr->val = aggr->ena = aggr->run = 0;
+ /*
+ * We calculate counter's data every interval,
+ * and the display code shows ps->res_stats
+ * avg value. We need to zero the stats for
+ * interval mode, otherwise overall avg running
+ * averages will be shown for each interval.
+ */
+ if (config->interval)
+ init_stats(ps->res_stats);
+
if (counter->per_pkg)
zero_per_pkg(counter);
diff --git a/tools/testing/nvdimm/test/nfit.c b/tools/testing/nvdimm/test/nfit.c
index 90bd2ea..b3281dc 100644
--- a/tools/testing/nvdimm/test/nfit.c
+++ b/tools/testing/nvdimm/test/nfit.c
@@ -217,13 +217,16 @@
return rc;
}
+#define NFIT_TEST_ARS_RECORDS 4
+
static int nfit_test_cmd_ars_cap(struct nd_cmd_ars_cap *nd_cmd,
unsigned int buf_len)
{
if (buf_len < sizeof(*nd_cmd))
return -EINVAL;
- nd_cmd->max_ars_out = 256;
+ nd_cmd->max_ars_out = sizeof(struct nd_cmd_ars_status)
+ + NFIT_TEST_ARS_RECORDS * sizeof(struct nd_ars_record);
nd_cmd->status = (ND_ARS_PERSISTENT | ND_ARS_VOLATILE) << 16;
return 0;
@@ -246,7 +249,8 @@
if (buf_len < sizeof(*nd_cmd))
return -EINVAL;
- nd_cmd->out_length = 256;
+ nd_cmd->out_length = sizeof(struct nd_cmd_ars_status);
+ /* TODO: emit error records */
nd_cmd->num_records = 0;
nd_cmd->address = 0;
nd_cmd->length = -1ULL;
diff --git a/tools/testing/selftests/efivarfs/efivarfs.sh b/tools/testing/selftests/efivarfs/efivarfs.sh
index 77edcdc..0572784 100755
--- a/tools/testing/selftests/efivarfs/efivarfs.sh
+++ b/tools/testing/selftests/efivarfs/efivarfs.sh
@@ -88,7 +88,11 @@
exit 1
fi
- rm $file
+ rm $file 2>/dev/null
+ if [ $? -ne 0 ]; then
+ chattr -i $file
+ rm $file
+ fi
if [ -e $file ]; then
echo "$file couldn't be deleted" >&2
@@ -111,6 +115,7 @@
exit 1
fi
+ chattr -i $file
printf "$attrs" > $file
if [ -e $file ]; then
@@ -141,7 +146,11 @@
echo "$file could not be created" >&2
ret=1
else
- rm $file
+ rm $file 2>/dev/null
+ if [ $? -ne 0 ]; then
+ chattr -i $file
+ rm $file
+ fi
fi
done
@@ -174,7 +183,11 @@
if [ -e $file ]; then
echo "Creating $file should have failed" >&2
- rm $file
+ rm $file 2>/dev/null
+ if [ $? -ne 0 ]; then
+ chattr -i $file
+ rm $file
+ fi
ret=1
fi
done
diff --git a/tools/testing/selftests/efivarfs/open-unlink.c b/tools/testing/selftests/efivarfs/open-unlink.c
index 8c07644..4af74f7 100644
--- a/tools/testing/selftests/efivarfs/open-unlink.c
+++ b/tools/testing/selftests/efivarfs/open-unlink.c
@@ -1,10 +1,68 @@
+#include <errno.h>
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <unistd.h>
+#include <sys/ioctl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
+#include <linux/fs.h>
+
+static int set_immutable(const char *path, int immutable)
+{
+ unsigned int flags;
+ int fd;
+ int rc;
+ int error;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
+ if (rc < 0) {
+ error = errno;
+ close(fd);
+ errno = error;
+ return rc;
+ }
+
+ if (immutable)
+ flags |= FS_IMMUTABLE_FL;
+ else
+ flags &= ~FS_IMMUTABLE_FL;
+
+ rc = ioctl(fd, FS_IOC_SETFLAGS, &flags);
+ error = errno;
+ close(fd);
+ errno = error;
+ return rc;
+}
+
+static int get_immutable(const char *path)
+{
+ unsigned int flags;
+ int fd;
+ int rc;
+ int error;
+
+ fd = open(path, O_RDONLY);
+ if (fd < 0)
+ return fd;
+
+ rc = ioctl(fd, FS_IOC_GETFLAGS, &flags);
+ if (rc < 0) {
+ error = errno;
+ close(fd);
+ errno = error;
+ return rc;
+ }
+ close(fd);
+ if (flags & FS_IMMUTABLE_FL)
+ return 1;
+ return 0;
+}
int main(int argc, char **argv)
{
@@ -27,7 +85,7 @@
buf[4] = 0;
/* create a test variable */
- fd = open(path, O_WRONLY | O_CREAT);
+ fd = open(path, O_WRONLY | O_CREAT, 0600);
if (fd < 0) {
perror("open(O_WRONLY)");
return EXIT_FAILURE;
@@ -41,6 +99,18 @@
close(fd);
+ rc = get_immutable(path);
+ if (rc < 0) {
+ perror("ioctl(FS_IOC_GETFLAGS)");
+ return EXIT_FAILURE;
+ } else if (rc) {
+ rc = set_immutable(path, 0);
+ if (rc < 0) {
+ perror("ioctl(FS_IOC_SETFLAGS)");
+ return EXIT_FAILURE;
+ }
+ }
+
fd = open(path, O_RDONLY);
if (fd < 0) {
perror("open");
diff --git a/tools/testing/selftests/ftrace/test.d/instances/instance.tc b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
index 773e276..1e1abe0 100644
--- a/tools/testing/selftests/ftrace/test.d/instances/instance.tc
+++ b/tools/testing/selftests/ftrace/test.d/instances/instance.tc
@@ -39,28 +39,23 @@
}
instance_slam &
-x=`jobs -l`
-p1=`echo $x | cut -d' ' -f2`
+p1=$!
echo $p1
instance_slam &
-x=`jobs -l | tail -1`
-p2=`echo $x | cut -d' ' -f2`
+p2=$!
echo $p2
instance_slam &
-x=`jobs -l | tail -1`
-p3=`echo $x | cut -d' ' -f2`
+p3=$!
echo $p3
instance_slam &
-x=`jobs -l | tail -1`
-p4=`echo $x | cut -d' ' -f2`
+p4=$!
echo $p4
instance_slam &
-x=`jobs -l | tail -1`
-p5=`echo $x | cut -d' ' -f2`
+p5=$!
echo $p5
ls -lR >/dev/null
diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c
index 69bca18..a9ad4fe 100644
--- a/virt/kvm/arm/arch_timer.c
+++ b/virt/kvm/arm/arch_timer.c
@@ -34,6 +34,11 @@
static struct workqueue_struct *wqueue;
static unsigned int host_vtimer_irq;
+void kvm_timer_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.timer_cpu.active_cleared_last = false;
+}
+
static cycle_t kvm_phys_timer_read(void)
{
return timecounter->cc->read(timecounter->cc);
@@ -130,6 +135,7 @@
BUG_ON(!vgic_initialized(vcpu->kvm));
+ timer->active_cleared_last = false;
timer->irq.level = new_level;
trace_kvm_timer_update_irq(vcpu->vcpu_id, timer->map->virt_irq,
timer->irq.level);
@@ -143,7 +149,7 @@
* Check if there was a change in the timer state (should we raise or lower
* the line level to the GIC).
*/
-static void kvm_timer_update_state(struct kvm_vcpu *vcpu)
+static int kvm_timer_update_state(struct kvm_vcpu *vcpu)
{
struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
@@ -154,10 +160,12 @@
* until we call this function from kvm_timer_flush_hwstate.
*/
if (!vgic_initialized(vcpu->kvm))
- return;
+ return -ENODEV;
if (kvm_timer_should_fire(vcpu) != timer->irq.level)
kvm_timer_update_irq(vcpu, !timer->irq.level);
+
+ return 0;
}
/*
@@ -218,7 +226,8 @@
bool phys_active;
int ret;
- kvm_timer_update_state(vcpu);
+ if (kvm_timer_update_state(vcpu))
+ return;
/*
* If we enter the guest with the virtual input level to the VGIC
@@ -242,10 +251,35 @@
else
phys_active = false;
+ /*
+ * We want to avoid hitting the (re)distributor as much as
+ * possible, as this is a potentially expensive MMIO access
+ * (not to mention locks in the irq layer), and a solution for
+ * this is to cache the "active" state in memory.
+ *
+ * Things to consider: we cannot cache an "active set" state,
+ * because the HW can change this behind our back (it becomes
+ * "clear" in the HW). We must then restrict the caching to
+ * the "clear" state.
+ *
+ * The cache is invalidated on:
+ * - vcpu put, indicating that the HW cannot be trusted to be
+ * in a sane state on the next vcpu load,
+ * - any change in the interrupt state
+ *
+ * Usage conditions:
+ * - cached value is "active clear"
+ * - value to be programmed is "active clear"
+ */
+ if (timer->active_cleared_last && !phys_active)
+ return;
+
ret = irq_set_irqchip_state(timer->map->irq,
IRQCHIP_STATE_ACTIVE,
phys_active);
WARN_ON(ret);
+
+ timer->active_cleared_last = !phys_active;
}
/**
diff --git a/arch/arm64/kvm/hyp/timer-sr.c b/virt/kvm/arm/hyp/timer-sr.c
similarity index 86%
rename from arch/arm64/kvm/hyp/timer-sr.c
rename to virt/kvm/arm/hyp/timer-sr.c
index 1051e5d..ea00d69 100644
--- a/arch/arm64/kvm/hyp/timer-sr.c
+++ b/virt/kvm/arm/hyp/timer-sr.c
@@ -19,9 +19,7 @@
#include <linux/compiler.h>
#include <linux/kvm_host.h>
-#include <asm/kvm_mmu.h>
-
-#include "hyp.h"
+#include <asm/kvm_hyp.h>
/* vcpu is already in the HYP VA space */
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
@@ -31,12 +29,12 @@
u64 val;
if (kvm->arch.timer.enabled) {
- timer->cntv_ctl = read_sysreg(cntv_ctl_el0);
- timer->cntv_cval = read_sysreg(cntv_cval_el0);
+ timer->cntv_ctl = read_sysreg_el0(cntv_ctl);
+ timer->cntv_cval = read_sysreg_el0(cntv_cval);
}
/* Disable the virtual timer */
- write_sysreg(0, cntv_ctl_el0);
+ write_sysreg_el0(0, cntv_ctl);
/* Allow physical timer/counter access for the host */
val = read_sysreg(cnthctl_el2);
@@ -64,8 +62,8 @@
if (kvm->arch.timer.enabled) {
write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
- write_sysreg(timer->cntv_cval, cntv_cval_el0);
+ write_sysreg_el0(timer->cntv_cval, cntv_cval);
isb();
- write_sysreg(timer->cntv_ctl, cntv_ctl_el0);
+ write_sysreg_el0(timer->cntv_ctl, cntv_ctl);
}
}
diff --git a/virt/kvm/arm/hyp/vgic-v2-sr.c b/virt/kvm/arm/hyp/vgic-v2-sr.c
new file mode 100644
index 0000000..674bdf8
--- /dev/null
+++ b/virt/kvm/arm/hyp/vgic-v2-sr.c
@@ -0,0 +1,170 @@
+/*
+ * Copyright (C) 2012-2015 - ARM Ltd
+ * Author: Marc Zyngier <marc.zyngier@arm.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/compiler.h>
+#include <linux/irqchip/arm-gic.h>
+#include <linux/kvm_host.h>
+
+#include <asm/kvm_hyp.h>
+
+static void __hyp_text save_maint_int_state(struct kvm_vcpu *vcpu,
+ void __iomem *base)
+{
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+ u32 eisr0, eisr1;
+ int i;
+ bool expect_mi;
+
+ expect_mi = !!(cpu_if->vgic_hcr & GICH_HCR_UIE);
+
+ for (i = 0; i < nr_lr; i++) {
+ if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
+ continue;
+
+ expect_mi |= (!(cpu_if->vgic_lr[i] & GICH_LR_HW) &&
+ (cpu_if->vgic_lr[i] & GICH_LR_EOI));
+ }
+
+ if (expect_mi) {
+ cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
+
+ if (cpu_if->vgic_misr & GICH_MISR_EOI) {
+ eisr0 = readl_relaxed(base + GICH_EISR0);
+ if (unlikely(nr_lr > 32))
+ eisr1 = readl_relaxed(base + GICH_EISR1);
+ else
+ eisr1 = 0;
+ } else {
+ eisr0 = eisr1 = 0;
+ }
+ } else {
+ cpu_if->vgic_misr = 0;
+ eisr0 = eisr1 = 0;
+ }
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ cpu_if->vgic_eisr = ((u64)eisr0 << 32) | eisr1;
+#else
+ cpu_if->vgic_eisr = ((u64)eisr1 << 32) | eisr0;
+#endif
+}
+
+static void __hyp_text save_elrsr(struct kvm_vcpu *vcpu, void __iomem *base)
+{
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+ u32 elrsr0, elrsr1;
+
+ elrsr0 = readl_relaxed(base + GICH_ELRSR0);
+ if (unlikely(nr_lr > 32))
+ elrsr1 = readl_relaxed(base + GICH_ELRSR1);
+ else
+ elrsr1 = 0;
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
+#else
+ cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
+#endif
+}
+
+static void __hyp_text save_lrs(struct kvm_vcpu *vcpu, void __iomem *base)
+{
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ int nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+ int i;
+
+ for (i = 0; i < nr_lr; i++) {
+ if (!(vcpu->arch.vgic_cpu.live_lrs & (1UL << i)))
+ continue;
+
+ if (cpu_if->vgic_elrsr & (1UL << i)) {
+ cpu_if->vgic_lr[i] &= ~GICH_LR_STATE;
+ continue;
+ }
+
+ cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
+ writel_relaxed(0, base + GICH_LR0 + (i * 4));
+ }
+}
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ struct vgic_dist *vgic = &kvm->arch.vgic;
+ void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+
+ if (!base)
+ return;
+
+ cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
+
+ if (vcpu->arch.vgic_cpu.live_lrs) {
+ cpu_if->vgic_apr = readl_relaxed(base + GICH_APR);
+
+ save_maint_int_state(vcpu, base);
+ save_elrsr(vcpu, base);
+ save_lrs(vcpu, base);
+
+ writel_relaxed(0, base + GICH_HCR);
+
+ vcpu->arch.vgic_cpu.live_lrs = 0;
+ } else {
+ cpu_if->vgic_eisr = 0;
+ cpu_if->vgic_elrsr = ~0UL;
+ cpu_if->vgic_misr = 0;
+ cpu_if->vgic_apr = 0;
+ }
+}
+
+/* vcpu is already in the HYP VA space */
+void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = kern_hyp_va(vcpu->kvm);
+ struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
+ struct vgic_dist *vgic = &kvm->arch.vgic;
+ void __iomem *base = kern_hyp_va(vgic->vctrl_base);
+ int i, nr_lr;
+ u64 live_lrs = 0;
+
+ if (!base)
+ return;
+
+ nr_lr = vcpu->arch.vgic_cpu.nr_lr;
+
+ for (i = 0; i < nr_lr; i++)
+ if (cpu_if->vgic_lr[i] & GICH_LR_STATE)
+ live_lrs |= 1UL << i;
+
+ if (live_lrs) {
+ writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
+ writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);
+ for (i = 0; i < nr_lr; i++) {
+ if (!(live_lrs & (1UL << i)))
+ continue;
+
+ writel_relaxed(cpu_if->vgic_lr[i],
+ base + GICH_LR0 + (i * 4));
+ }
+ }
+
+ writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
+ vcpu->arch.vgic_cpu.live_lrs = live_lrs;
+}
diff --git a/virt/kvm/arm/pmu.c b/virt/kvm/arm/pmu.c
new file mode 100644
index 0000000..b5754c6
--- /dev/null
+++ b/virt/kvm/arm/pmu.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2015 Linaro Ltd.
+ * Author: Shannon Zhao <shannon.zhao@linaro.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/cpu.h>
+#include <linux/kvm.h>
+#include <linux/kvm_host.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+#include <asm/kvm_emulate.h>
+#include <kvm/arm_pmu.h>
+#include <kvm/arm_vgic.h>
+
+/**
+ * kvm_pmu_get_counter_value - get PMU counter value
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ */
+u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx)
+{
+ u64 counter, reg, enabled, running;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+
+ reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
+ ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
+ counter = vcpu_sys_reg(vcpu, reg);
+
+ /* The real counter value is equal to the value of counter register plus
+ * the value perf event counts.
+ */
+ if (pmc->perf_event)
+ counter += perf_event_read_value(pmc->perf_event, &enabled,
+ &running);
+
+ return counter & pmc->bitmask;
+}
+
+/**
+ * kvm_pmu_set_counter_value - set PMU counter value
+ * @vcpu: The vcpu pointer
+ * @select_idx: The counter index
+ * @val: The counter value
+ */
+void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val)
+{
+ u64 reg;
+
+ reg = (select_idx == ARMV8_PMU_CYCLE_IDX)
+ ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx;
+ vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx);
+}
+
+/**
+ * kvm_pmu_stop_counter - stop PMU counter
+ * @pmc: The PMU counter pointer
+ *
+ * If this counter has been configured to monitor some event, release it here.
+ */
+static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
+{
+ u64 counter, reg;
+
+ if (pmc->perf_event) {
+ counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
+ reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
+ ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
+ vcpu_sys_reg(vcpu, reg) = counter;
+ perf_event_disable(pmc->perf_event);
+ perf_event_release_kernel(pmc->perf_event);
+ pmc->perf_event = NULL;
+ }
+}
+
+/**
+ * kvm_pmu_vcpu_reset - reset pmu state for cpu
+ * @vcpu: The vcpu pointer
+ *
+ */
+void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+ kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]);
+ pmu->pmc[i].idx = i;
+ pmu->pmc[i].bitmask = 0xffffffffUL;
+ }
+}
+
+/**
+ * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
+ * @vcpu: The vcpu pointer
+ *
+ */
+void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ int i;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+
+ for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+ struct kvm_pmc *pmc = &pmu->pmc[i];
+
+ if (pmc->perf_event) {
+ perf_event_disable(pmc->perf_event);
+ perf_event_release_kernel(pmc->perf_event);
+ pmc->perf_event = NULL;
+ }
+ }
+}
+
+u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu)
+{
+ u64 val = vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT;
+
+ val &= ARMV8_PMU_PMCR_N_MASK;
+ if (val == 0)
+ return BIT(ARMV8_PMU_CYCLE_IDX);
+ else
+ return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX);
+}
+
+/**
+ * kvm_pmu_enable_counter - enable selected PMU counter
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCNTENSET register
+ *
+ * Call perf_event_enable to start counting the perf event
+ */
+void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val)
+{
+ int i;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc;
+
+ if (!(vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val)
+ return;
+
+ for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+ if (!(val & BIT(i)))
+ continue;
+
+ pmc = &pmu->pmc[i];
+ if (pmc->perf_event) {
+ perf_event_enable(pmc->perf_event);
+ if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE)
+ kvm_debug("fail to enable perf event\n");
+ }
+ }
+}
+
+/**
+ * kvm_pmu_disable_counter - disable selected PMU counter
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCNTENCLR register
+ *
+ * Call perf_event_disable to stop counting the perf event
+ */
+void kvm_pmu_disable_counter(struct kvm_vcpu *vcpu, u64 val)
+{
+ int i;
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc;
+
+ if (!val)
+ return;
+
+ for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
+ if (!(val & BIT(i)))
+ continue;
+
+ pmc = &pmu->pmc[i];
+ if (pmc->perf_event)
+ perf_event_disable(pmc->perf_event);
+ }
+}
+
+static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu)
+{
+ u64 reg = 0;
+
+ if ((vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E))
+ reg = vcpu_sys_reg(vcpu, PMOVSSET_EL0);
+ reg &= vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+ reg &= vcpu_sys_reg(vcpu, PMINTENSET_EL1);
+ reg &= kvm_pmu_valid_counter_mask(vcpu);
+
+ return reg;
+}
+
+/**
+ * kvm_pmu_overflow_set - set PMU overflow interrupt
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMOVSSET register
+ */
+void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val)
+{
+ u64 reg;
+
+ if (val == 0)
+ return;
+
+ vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= val;
+ reg = kvm_pmu_overflow_status(vcpu);
+ if (reg != 0)
+ kvm_vcpu_kick(vcpu);
+}
+
+static void kvm_pmu_update_state(struct kvm_vcpu *vcpu)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ bool overflow;
+
+ if (!kvm_arm_pmu_v3_ready(vcpu))
+ return;
+
+ overflow = !!kvm_pmu_overflow_status(vcpu);
+ if (pmu->irq_level != overflow) {
+ pmu->irq_level = overflow;
+ kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id,
+ pmu->irq_num, overflow);
+ }
+}
+
+/**
+ * kvm_pmu_flush_hwstate - flush pmu state to cpu
+ * @vcpu: The vcpu pointer
+ *
+ * Check if the PMU has overflowed while we were running in the host, and inject
+ * an interrupt if that was the case.
+ */
+void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu)
+{
+ kvm_pmu_update_state(vcpu);
+}
+
+/**
+ * kvm_pmu_sync_hwstate - sync pmu state from cpu
+ * @vcpu: The vcpu pointer
+ *
+ * Check if the PMU has overflowed while we were running in the guest, and
+ * inject an interrupt if that was the case.
+ */
+void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu)
+{
+ kvm_pmu_update_state(vcpu);
+}
+
+static inline struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc)
+{
+ struct kvm_pmu *pmu;
+ struct kvm_vcpu_arch *vcpu_arch;
+
+ pmc -= pmc->idx;
+ pmu = container_of(pmc, struct kvm_pmu, pmc[0]);
+ vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu);
+ return container_of(vcpu_arch, struct kvm_vcpu, arch);
+}
+
+/**
+ * When perf event overflows, call kvm_pmu_overflow_set to set overflow status.
+ */
+static void kvm_pmu_perf_overflow(struct perf_event *perf_event,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct kvm_pmc *pmc = perf_event->overflow_handler_context;
+ struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc);
+ int idx = pmc->idx;
+
+ kvm_pmu_overflow_set(vcpu, BIT(idx));
+}
+
+/**
+ * kvm_pmu_software_increment - do software increment
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMSWINC register
+ */
+void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val)
+{
+ int i;
+ u64 type, enable, reg;
+
+ if (val == 0)
+ return;
+
+ enable = vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
+ for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++) {
+ if (!(val & BIT(i)))
+ continue;
+ type = vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i)
+ & ARMV8_PMU_EVTYPE_EVENT;
+ if ((type == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
+ && (enable & BIT(i))) {
+ reg = vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1;
+ reg = lower_32_bits(reg);
+ vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg;
+ if (!reg)
+ kvm_pmu_overflow_set(vcpu, BIT(i));
+ }
+ }
+}
+
+/**
+ * kvm_pmu_handle_pmcr - handle PMCR register
+ * @vcpu: The vcpu pointer
+ * @val: the value guest writes to PMCR register
+ */
+void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc;
+ u64 mask;
+ int i;
+
+ mask = kvm_pmu_valid_counter_mask(vcpu);
+ if (val & ARMV8_PMU_PMCR_E) {
+ kvm_pmu_enable_counter(vcpu,
+ vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & mask);
+ } else {
+ kvm_pmu_disable_counter(vcpu, mask);
+ }
+
+ if (val & ARMV8_PMU_PMCR_C)
+ kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0);
+
+ if (val & ARMV8_PMU_PMCR_P) {
+ for (i = 0; i < ARMV8_PMU_CYCLE_IDX; i++)
+ kvm_pmu_set_counter_value(vcpu, i, 0);
+ }
+
+ if (val & ARMV8_PMU_PMCR_LC) {
+ pmc = &pmu->pmc[ARMV8_PMU_CYCLE_IDX];
+ pmc->bitmask = 0xffffffffffffffffUL;
+ }
+}
+
+static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx)
+{
+ return (vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) &&
+ (vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx));
+}
+
+/**
+ * kvm_pmu_set_counter_event_type - set selected counter to monitor some event
+ * @vcpu: The vcpu pointer
+ * @data: The data guest writes to PMXEVTYPER_EL0
+ * @select_idx: The number of selected counter
+ *
+ * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an
+ * event with given hardware event number. Here we call perf_event API to
+ * emulate this action and create a kernel perf event for it.
+ */
+void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
+ u64 select_idx)
+{
+ struct kvm_pmu *pmu = &vcpu->arch.pmu;
+ struct kvm_pmc *pmc = &pmu->pmc[select_idx];
+ struct perf_event *event;
+ struct perf_event_attr attr;
+ u64 eventsel, counter;
+
+ kvm_pmu_stop_counter(vcpu, pmc);
+ eventsel = data & ARMV8_PMU_EVTYPE_EVENT;
+
+ /* Software increment event does't need to be backed by a perf event */
+ if (eventsel == ARMV8_PMU_EVTYPE_EVENT_SW_INCR)
+ return;
+
+ memset(&attr, 0, sizeof(struct perf_event_attr));
+ attr.type = PERF_TYPE_RAW;
+ attr.size = sizeof(attr);
+ attr.pinned = 1;
+ attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, select_idx);
+ attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0;
+ attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0;
+ attr.exclude_hv = 1; /* Don't count EL2 events */
+ attr.exclude_host = 1; /* Don't count host events */
+ attr.config = eventsel;
+
+ counter = kvm_pmu_get_counter_value(vcpu, select_idx);
+ /* The initial sample period (overflow count) of an event. */
+ attr.sample_period = (-counter) & pmc->bitmask;
+
+ event = perf_event_create_kernel_counter(&attr, -1, current,
+ kvm_pmu_perf_overflow, pmc);
+ if (IS_ERR(event)) {
+ pr_err_once("kvm: pmu event creation failed %ld\n",
+ PTR_ERR(event));
+ return;
+ }
+
+ pmc->perf_event = event;
+}
+
+bool kvm_arm_support_pmu_v3(void)
+{
+ /*
+ * Check if HW_PERF_EVENTS are supported by checking the number of
+ * hardware performance counters. This could ensure the presence of
+ * a physical PMU and CONFIG_PERF_EVENT is selected.
+ */
+ return (perf_num_counters() > 0);
+}
+
+static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu)
+{
+ if (!kvm_arm_support_pmu_v3())
+ return -ENODEV;
+
+ if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features) ||
+ !kvm_arm_pmu_irq_initialized(vcpu))
+ return -ENXIO;
+
+ if (kvm_arm_pmu_v3_ready(vcpu))
+ return -EBUSY;
+
+ kvm_pmu_vcpu_reset(vcpu);
+ vcpu->arch.pmu.ready = true;
+
+ return 0;
+}
+
+static bool irq_is_valid(struct kvm *kvm, int irq, bool is_ppi)
+{
+ int i;
+ struct kvm_vcpu *vcpu;
+
+ kvm_for_each_vcpu(i, vcpu, kvm) {
+ if (!kvm_arm_pmu_irq_initialized(vcpu))
+ continue;
+
+ if (is_ppi) {
+ if (vcpu->arch.pmu.irq_num != irq)
+ return false;
+ } else {
+ if (vcpu->arch.pmu.irq_num == irq)
+ return false;
+ }
+ }
+
+ return true;
+}
+
+
+int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ switch (attr->attr) {
+ case KVM_ARM_VCPU_PMU_V3_IRQ: {
+ int __user *uaddr = (int __user *)(long)attr->addr;
+ int irq;
+
+ if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
+ return -ENODEV;
+
+ if (get_user(irq, uaddr))
+ return -EFAULT;
+
+ /*
+ * The PMU overflow interrupt could be a PPI or SPI, but for one
+ * VM the interrupt type must be same for each vcpu. As a PPI,
+ * the interrupt number is the same for all vcpus, while as an
+ * SPI it must be a separate number per vcpu.
+ */
+ if (irq < VGIC_NR_SGIS || irq >= vcpu->kvm->arch.vgic.nr_irqs ||
+ !irq_is_valid(vcpu->kvm, irq, irq < VGIC_NR_PRIVATE_IRQS))
+ return -EINVAL;
+
+ if (kvm_arm_pmu_irq_initialized(vcpu))
+ return -EBUSY;
+
+ kvm_debug("Set kvm ARM PMU irq: %d\n", irq);
+ vcpu->arch.pmu.irq_num = irq;
+ return 0;
+ }
+ case KVM_ARM_VCPU_PMU_V3_INIT:
+ return kvm_arm_pmu_v3_init(vcpu);
+ }
+
+ return -ENXIO;
+}
+
+int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ switch (attr->attr) {
+ case KVM_ARM_VCPU_PMU_V3_IRQ: {
+ int __user *uaddr = (int __user *)(long)attr->addr;
+ int irq;
+
+ if (!test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
+ return -ENODEV;
+
+ if (!kvm_arm_pmu_irq_initialized(vcpu))
+ return -ENXIO;
+
+ irq = vcpu->arch.pmu.irq_num;
+ return put_user(irq, uaddr);
+ }
+ }
+
+ return -ENXIO;
+}
+
+int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr)
+{
+ switch (attr->attr) {
+ case KVM_ARM_VCPU_PMU_V3_IRQ:
+ case KVM_ARM_VCPU_PMU_V3_INIT:
+ if (kvm_arm_support_pmu_v3() &&
+ test_bit(KVM_ARM_VCPU_PMU_V3, vcpu->arch.features))
+ return 0;
+ }
+
+ return -ENXIO;
+}
diff --git a/virt/kvm/arm/vgic-v2-emul.c b/virt/kvm/arm/vgic-v2-emul.c
index 1390797..1b0bee0 100644
--- a/virt/kvm/arm/vgic-v2-emul.c
+++ b/virt/kvm/arm/vgic-v2-emul.c
@@ -321,6 +321,11 @@
static const struct vgic_io_range vgic_dist_ranges[] = {
{
+ .base = GIC_DIST_SOFTINT,
+ .len = 4,
+ .handle_mmio = handle_mmio_sgi_reg,
+ },
+ {
.base = GIC_DIST_CTRL,
.len = 12,
.bits_per_irq = 0,
@@ -387,11 +392,6 @@
.handle_mmio = handle_mmio_cfg_reg,
},
{
- .base = GIC_DIST_SOFTINT,
- .len = 4,
- .handle_mmio = handle_mmio_sgi_reg,
- },
- {
.base = GIC_DIST_SGI_PENDING_CLEAR,
.len = VGIC_NR_SGIS,
.handle_mmio = handle_mmio_sgi_clear,
diff --git a/virt/kvm/arm/vgic-v2.c b/virt/kvm/arm/vgic-v2.c
index ff02f08..67ec334 100644
--- a/virt/kvm/arm/vgic-v2.c
+++ b/virt/kvm/arm/vgic-v2.c
@@ -176,6 +176,15 @@
static struct vgic_params vgic_v2_params;
+static void vgic_cpu_init_lrs(void *params)
+{
+ struct vgic_params *vgic = params;
+ int i;
+
+ for (i = 0; i < vgic->nr_lr; i++)
+ writel_relaxed(0, vgic->vctrl_base + GICH_LR0 + (i * 4));
+}
+
/**
* vgic_v2_probe - probe for a GICv2 compatible interrupt controller in DT
* @node: pointer to the DT node
@@ -257,6 +266,9 @@
vgic->type = VGIC_V2;
vgic->max_gic_vcpus = VGIC_V2_MAX_CPUS;
+
+ on_each_cpu(vgic_cpu_init_lrs, vgic, 1);
+
*ops = &vgic_v2_ops;
*params = vgic;
goto out;
diff --git a/virt/kvm/arm/vgic-v3.c b/virt/kvm/arm/vgic-v3.c
index 453eafd..999bdc6 100644
--- a/virt/kvm/arm/vgic-v3.c
+++ b/virt/kvm/arm/vgic-v3.c
@@ -42,7 +42,7 @@
static struct vgic_lr vgic_v3_get_lr(const struct kvm_vcpu *vcpu, int lr)
{
struct vgic_lr lr_desc;
- u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)];
+ u64 val = vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr];
if (vcpu->kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)
lr_desc.irq = val & ICH_LR_VIRTUALID_MASK;
@@ -106,7 +106,7 @@
lr_val |= ((u64)lr_desc.hwirq) << ICH_LR_PHYS_ID_SHIFT;
}
- vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[VGIC_V3_LR_INDEX(lr)] = lr_val;
+ vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = lr_val;
if (!(lr_desc.state & LR_STATE_MASK))
vcpu->arch.vgic_cpu.vgic_v3.vgic_elrsr |= (1U << lr);
@@ -216,6 +216,11 @@
static struct vgic_params vgic_v3_params;
+static void vgic_cpu_init_lrs(void *params)
+{
+ kvm_call_hyp(__vgic_v3_init_lrs);
+}
+
/**
* vgic_v3_probe - probe for a GICv3 compatible interrupt controller in DT
* @node: pointer to the DT node
@@ -284,6 +289,8 @@
kvm_info("%s@%llx IRQ%d\n", vgic_node->name,
vcpu_res.start, vgic->maint_irq);
+ on_each_cpu(vgic_cpu_init_lrs, vgic, 1);
+
*ops = &vgic_v3_ops;
*params = vgic;
diff --git a/virt/kvm/arm/vgic.c b/virt/kvm/arm/vgic.c
index 043032c..00429b3 100644
--- a/virt/kvm/arm/vgic.c
+++ b/virt/kvm/arm/vgic.c
@@ -1875,8 +1875,8 @@
static int vgic_vcpu_init_maps(struct kvm_vcpu *vcpu, int nr_irqs)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
-
- int sz = (nr_irqs - VGIC_NR_PRIVATE_IRQS) / 8;
+ int nr_longs = BITS_TO_LONGS(nr_irqs - VGIC_NR_PRIVATE_IRQS);
+ int sz = nr_longs * sizeof(unsigned long);
vgic_cpu->pending_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->active_shared = kzalloc(sz, GFP_KERNEL);
vgic_cpu->pend_act_shared = kzalloc(sz, GFP_KERNEL);
diff --git a/virt/kvm/async_pf.c b/virt/kvm/async_pf.c
index 3531599..b866374 100644
--- a/virt/kvm/async_pf.c
+++ b/virt/kvm/async_pf.c
@@ -109,8 +109,8 @@
/* cancel outstanding work queue item */
while (!list_empty(&vcpu->async_pf.queue)) {
struct kvm_async_pf *work =
- list_entry(vcpu->async_pf.queue.next,
- typeof(*work), queue);
+ list_first_entry(&vcpu->async_pf.queue,
+ typeof(*work), queue);
list_del(&work->queue);
#ifdef CONFIG_KVM_ASYNC_PF_SYNC
@@ -127,8 +127,8 @@
spin_lock(&vcpu->async_pf.lock);
while (!list_empty(&vcpu->async_pf.done)) {
struct kvm_async_pf *work =
- list_entry(vcpu->async_pf.done.next,
- typeof(*work), link);
+ list_first_entry(&vcpu->async_pf.done,
+ typeof(*work), link);
list_del(&work->link);
kmem_cache_free(async_pf_cache, work);
}
@@ -172,7 +172,7 @@
* do alloc nowait since if we are going to sleep anyway we
* may as well sleep faulting in page
*/
- work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT);
+ work = kmem_cache_zalloc(async_pf_cache, GFP_NOWAIT | __GFP_NOWARN);
if (!work)
return 0;
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index a11cfd2..1eae052 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -72,11 +72,11 @@
/* Default doubles per-vcpu halt_poll_ns. */
static unsigned int halt_poll_ns_grow = 2;
-module_param(halt_poll_ns_grow, int, S_IRUGO);
+module_param(halt_poll_ns_grow, uint, S_IRUGO | S_IWUSR);
/* Default resets per-vcpu halt_poll_ns . */
static unsigned int halt_poll_ns_shrink;
-module_param(halt_poll_ns_shrink, int, S_IRUGO);
+module_param(halt_poll_ns_shrink, uint, S_IRUGO | S_IWUSR);
/*
* Ordering of locks:
@@ -620,13 +620,10 @@
static void kvm_destroy_devices(struct kvm *kvm)
{
- struct list_head *node, *tmp;
+ struct kvm_device *dev, *tmp;
- list_for_each_safe(node, tmp, &kvm->devices) {
- struct kvm_device *dev =
- list_entry(node, struct kvm_device, vm_node);
-
- list_del(node);
+ list_for_each_entry_safe(dev, tmp, &kvm->devices, vm_node) {
+ list_del(&dev->vm_node);
dev->ops->destroy(dev);
}
}
@@ -1437,11 +1434,17 @@
{
unsigned long addr = __gfn_to_hva_many(slot, gfn, NULL, write_fault);
- if (addr == KVM_HVA_ERR_RO_BAD)
+ if (addr == KVM_HVA_ERR_RO_BAD) {
+ if (writable)
+ *writable = false;
return KVM_PFN_ERR_RO_FAULT;
+ }
- if (kvm_is_error_hva(addr))
+ if (kvm_is_error_hva(addr)) {
+ if (writable)
+ *writable = false;
return KVM_PFN_NOSLOT;
+ }
/* Do not map writable pfn in the readonly memslot. */
if (writable && memslot_is_readonly(slot)) {
@@ -1943,14 +1946,15 @@
static void grow_halt_poll_ns(struct kvm_vcpu *vcpu)
{
- int old, val;
+ unsigned int old, val, grow;
old = val = vcpu->halt_poll_ns;
+ grow = READ_ONCE(halt_poll_ns_grow);
/* 10us base */
- if (val == 0 && halt_poll_ns_grow)
+ if (val == 0 && grow)
val = 10000;
else
- val *= halt_poll_ns_grow;
+ val *= grow;
vcpu->halt_poll_ns = val;
trace_kvm_halt_poll_ns_grow(vcpu->vcpu_id, val, old);
@@ -1958,13 +1962,14 @@
static void shrink_halt_poll_ns(struct kvm_vcpu *vcpu)
{
- int old, val;
+ unsigned int old, val, shrink;
old = val = vcpu->halt_poll_ns;
- if (halt_poll_ns_shrink == 0)
+ shrink = READ_ONCE(halt_poll_ns_shrink);
+ if (shrink == 0)
val = 0;
else
- val /= halt_poll_ns_shrink;
+ val /= shrink;
vcpu->halt_poll_ns = val;
trace_kvm_halt_poll_ns_shrink(vcpu->vcpu_id, val, old);