Merge "ARM: dts: msm: Add modem cooling devices and sensors for kona"
diff --git a/Documentation/ABI/testing/procfs-concurrent_time b/Documentation/ABI/testing/procfs-concurrent_time
new file mode 100644
index 0000000..55b4142
--- /dev/null
+++ b/Documentation/ABI/testing/procfs-concurrent_time
@@ -0,0 +1,16 @@
+What: /proc/uid_concurrent_active_time
+Date: December 2018
+Contact: Connor O'Brien <connoro@google.com>
+Description:
+ The /proc/uid_concurrent_active_time file displays aggregated cputime
+ numbers for each uid, broken down by the total number of cores that were
+ active while the uid's task was running.
+
+What: /proc/uid_concurrent_policy_time
+Date: December 2018
+Contact: Connor O'Brien <connoro@google.com>
+Description:
+ The /proc/uid_concurrent_policy_time file displays aggregated cputime
+ numbers for each uid, broken down based on the cpufreq policy
+ of the core used by the uid's task and the number of cores associated
+ with that policy that were active while the uid's task was running.
diff --git a/Documentation/accounting/psi.txt b/Documentation/accounting/psi.txt
index b8ca28b..4fb40fe 100644
--- a/Documentation/accounting/psi.txt
+++ b/Documentation/accounting/psi.txt
@@ -63,6 +63,110 @@
which wouldn't necessarily make a dent in the time averages, or to
average trends over custom time frames.
+Monitoring for pressure thresholds
+==================================
+
+Users can register triggers and use poll() to be woken up when resource
+pressure exceeds certain thresholds.
+
+A trigger describes the maximum cumulative stall time over a specific
+time window, e.g. 100ms of total stall time within any 500ms window to
+generate a wakeup event.
+
+To register a trigger user has to open psi interface file under
+/proc/pressure/ representing the resource to be monitored and write the
+desired threshold and time window. The open file descriptor should be
+used to wait for trigger events using select(), poll() or epoll().
+The following format is used:
+
+<some|full> <stall amount in us> <time window in us>
+
+For example writing "some 150000 1000000" into /proc/pressure/memory
+would add 150ms threshold for partial memory stall measured within
+1sec time window. Writing "full 50000 1000000" into /proc/pressure/io
+would add 50ms threshold for full io stall measured within 1sec time window.
+
+Triggers can be set on more than one psi metric and more than one trigger
+for the same psi metric can be specified. However for each trigger a separate
+file descriptor is required to be able to poll it separately from others,
+therefore for each trigger a separate open() syscall should be made even
+when opening the same psi interface file.
+
+Monitors activate only when system enters stall state for the monitored
+psi metric and deactivates upon exit from the stall state. While system is
+in the stall state psi signal growth is monitored at a rate of 10 times per
+tracking window.
+
+The kernel accepts window sizes ranging from 500ms to 10s, therefore min
+monitoring update interval is 50ms and max is 1s. Min limit is set to
+prevent overly frequent polling. Max limit is chosen as a high enough number
+after which monitors are most likely not needed and psi averages can be used
+instead.
+
+When activated, psi monitor stays active for at least the duration of one
+tracking window to avoid repeated activations/deactivations when system is
+bouncing in and out of the stall state.
+
+Notifications to the userspace are rate-limited to one per tracking window.
+
+The trigger will de-register when the file descriptor used to define the
+trigger is closed.
+
+Userspace monitor usage example
+===============================
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <poll.h>
+#include <string.h>
+#include <unistd.h>
+
+/*
+ * Monitor memory partial stall with 1s tracking window size
+ * and 150ms threshold.
+ */
+int main() {
+ const char trig[] = "some 150000 1000000";
+ struct pollfd fds;
+ int n;
+
+ fds.fd = open("/proc/pressure/memory", O_RDWR | O_NONBLOCK);
+ if (fds.fd < 0) {
+ printf("/proc/pressure/memory open error: %s\n",
+ strerror(errno));
+ return 1;
+ }
+ fds.events = POLLPRI;
+
+ if (write(fds.fd, trig, strlen(trig) + 1) < 0) {
+ printf("/proc/pressure/memory write error: %s\n",
+ strerror(errno));
+ return 1;
+ }
+
+ printf("waiting for events...\n");
+ while (1) {
+ n = poll(&fds, 1, -1);
+ if (n < 0) {
+ printf("poll error: %s\n", strerror(errno));
+ return 1;
+ }
+ if (fds.revents & POLLERR) {
+ printf("got POLLERR, event source is gone\n");
+ return 0;
+ }
+ if (fds.revents & POLLPRI) {
+ printf("event triggered!\n");
+ } else {
+ printf("unknown event received: 0x%x\n", fds.revents);
+ return 1;
+ }
+ }
+
+ return 0;
+}
+
Cgroup2 interface
=================
@@ -71,3 +175,6 @@
into cgroups. Each subdirectory in the cgroupfs mountpoint contains
cpu.pressure, memory.pressure, and io.pressure files; the format is
the same as the /proc/pressure/ files.
+
+Per-cgroup psi monitors can be specified and used the same way as
+system-wide ones.
diff --git a/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt b/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt
new file mode 100644
index 0000000..d82d521
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/msm/sleepstate-smp2p.txt
@@ -0,0 +1,19 @@
+Qualcomm Technologies, Inc. SMSM Point-to-Point (SMP2P) Sleepstate driver
+
+Required properties:
+-compatible : should be one of the following:
+- "qcom,smp2p-sleepstate"
+-qcom,smem-states : the relevant outgoing smp2p entry
+- interrupt-parent: specifies the phandle to the parent interrupt controller
+ this one is cascaded from
+- interrupts: specifies the interrupt number, the irq line to be used
+- interrupt-names: Interrupt name string, must be "smp2p-sleepstate-in"
+
+Example:
+qcom,smp2p_sleepstate {
+ compatible = "qcom,smp2p-sleepstate";
+ qcom,smem-states = <&sleepstate_smp2p_out 0>;
+ interrupt-parent = <&sleepstate_smp2p_in>;
+ interrupts = <0 0>;
+ interrupt-names = "smp2p-sleepstate-in";
+};
diff --git a/Documentation/devicetree/bindings/arm/msm/wil6210.txt b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
index 23e4bd5..acc1915 100644
--- a/Documentation/devicetree/bindings/arm/msm/wil6210.txt
+++ b/Documentation/devicetree/bindings/arm/msm/wil6210.txt
@@ -9,11 +9,6 @@
Required properties:
- compatible: "qcom,wil6210"
-- qcom,smmu-support: Boolean flag indicating whether PCIe has SMMU support
-- qcom,smmu-s1-en: Boolean flag indicating whether SMMU stage1 should be enabled
-- qcom,smmu-fast-map: Boolean flag indicating whether SMMU fast mapping should be enabled
-- qcom,smmu-coherent: Boolean flag indicating SMMU dma and page table coherency
-- qcom,smmu-mapping: specifies the base address and size of SMMU space
- qcom,pcie-parent: phandle for the PCIe root complex to which 11ad card is connected
- Refer to "Documentation/devicetree/bindings/arm/msm/msm_bus.txt" for
the below optional properties:
@@ -29,6 +24,7 @@
- qcom,use-ext-supply: Boolean flag to indicate if 11ad SIP uses external power supply
- vdd-supply: phandle to 11ad VDD regulator node
- vddio-supply: phandle to 11ad VDDIO regulator node
+- vdd-ldo-supply: phandle to 11ad VDD LDO regulator node
- qcom,use-ext-clocks: Boolean flag to indicate if 11ad SIP uses external clocks
- clocks : List of phandle and clock specifier pairs
- clock-names : List of clock input name strings sorted in the same
@@ -39,11 +35,6 @@
Example:
wil6210: qcom,wil6210 {
compatible = "qcom,wil6210";
- qcom,smmu-support;
- qcom,smmu-s1-en;
- qcom,smmu-fast-map;
- qcom,smmu-coherent;
- qcom,smmu-mapping = <0x20000000 0xe0000000>;
qcom,pcie-parent = <&pcie1>;
qcom,wigig-en = <&tlmm 94 0>;
qcom,wigig-dc = <&tlmm 81 0>;
@@ -56,6 +47,7 @@
qcom,use-ext-supply;
vdd-supply= <&pm8998_s7>;
vddio-supply= <&pm8998_s5>;
+ vdd-ldo-supply = <&pm8150_l15>;
qcom,use-ext-clocks;
clocks = <&clock_gcc clk_rf_clk3>,
<&clock_gcc clk_rf_clk3_pin>;
@@ -63,3 +55,32 @@
qcom,keep-radio-on-during-sleep;
};
+Wil6210 client node under PCIe RP node needed for SMMU initialization by
+PCI framework when devices are discovered.
+
+Required properties:
+
+- qcom,iommu-dma-addr-pool: specifies the base address and size of SMMU space
+- qcom,iommu-dma: define the SMMU mode - bypass/fastmap/disabled
+- qcom,iommu-pagetable: indicating SMMU dma and page table coherency
+
+Example:
+&pcie1_rp {
+ #address-cells = <5>;
+ #size-cells = <0>;
+
+ wil6210_pci: wil6210_pci {
+ reg = <0 0 0 0 0>;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ qcom,iommu-group = <&wil6210_pci_iommu_group>;
+
+ wil6210_pci_iommu_group: wil6210_pci_iommu_group {
+ qcom,iommu-dma-addr-pool = <0x20000000 0xe0000000>;
+ qcom,iommu-dma = "fastmap";
+ qcom,iommu-pagetable = "coherent";
+ };
+ };
+};
diff --git a/Documentation/devicetree/bindings/display/msm/sde-dp.txt b/Documentation/devicetree/bindings/display/msm/sde-dp.txt
index a17b738..7881230 100644
--- a/Documentation/devicetree/bindings/display/msm/sde-dp.txt
+++ b/Documentation/devicetree/bindings/display/msm/sde-dp.txt
@@ -100,11 +100,14 @@
- compatible: Must be "qcom,msm-ext-disp"
- qcom,dp-low-power-hw-hpd: Low power hardware HPD feature enable control node
- qcom,phy-version: Phy version
+- qcom,pn-swap-lane-map: P/N swap configuration of each lane
- pinctrl-names: List of names to assign mdss pin states defined in pinctrl device node
Refer to pinctrl-bindings.txt
- pinctrl-<0..n>: Lists phandles each pointing to the pin configuration node within a pin
controller. These pin configurations are installed in the pinctrl
device node. Refer to pinctrl-bindings.txt
+- qcom,max-lclk-frequency-khz: An integer specifying the max. link clock in KHz supported by Display Port.
+- qcom,mst-fixed-topology-ports: u32 values of which MST output port to reserve, start from one
[Optional child nodes]: These nodes are for devices which are
dependent on msm_ext_disp. If msm_ext_disp is disabled then
diff --git a/Documentation/devicetree/bindings/media/video/msm-vidc.txt b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
index f31ced7..2f948e8 100644
--- a/Documentation/devicetree/bindings/media/video/msm-vidc.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-vidc.txt
@@ -120,17 +120,15 @@
boot/dts/include/dt-bindings/msm/msm-bus-ids.h for list of acceptable slaves
Optional properties:
-- qcom,bus-governor : governor to use when scaling bus, generally any commonly
- found devfreq governor might be used. In addition to those governors, the
- custom Venus governors, "msm-vidc-ddr" or "msm-vidc-llcc" are also
- acceptable values.
- In the absence of this property the "performance" governor is used.
-- qcom,bus-rage-kbps : an array of two items (<min max>) that indicate the
+- qcom,bus-range-kbps : an array of two items (<min max>) that indicate the
minimum and maximum acceptable votes for the bus.
In the absence of this property <0 INT_MAX> is used.
- qcom,ubwc-10bit : UBWC 10 bit content has different bus requirements,
this tag will be used to pick the appropriate bus as per the session profile
as shown below in example.
+- qcom,mode : Type of BW calculations to use.
+ "performance" - Use highest valid BW vote.
+ "venus-ddr", "venus-llcc" - Calculate for DDR, LLCC path.
Memory Heaps
============
diff --git a/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt b/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt
new file mode 100644
index 0000000..69debce
--- /dev/null
+++ b/Documentation/devicetree/bindings/net/qrtr-fifo-xprt.txt
@@ -0,0 +1,25 @@
+Qualcomm Technologies, Inc. IPC Router FIFO Transport
+
+Required properties:
+- compatible: should be "qcom,ipcr-fifo-xprt"
+- reg: the irq register to raise an interrupt
+- interrupts: the receiving interrupt line
+- qcom,ipc-shm: reference to shared memory phandle
+
+Example:
+
+ fifo_vipc_irq@176 {
+ compatible = "qcom,ipcr-fifo-xprt";
+ reg = <0x176>;
+ interrupts = <0x0 0x142 0x1>;
+ qcom,ipc-shm = <&ipc-shm>;
+ };
+
+ ipc-shm: shared-buffer@85af7000 {
+ compatible = "qcom,hypervisor-shared-memory";
+ phandle = <0x1e4>;
+ reg = <0x0 0x85af7000 0x0 0x9000>;
+ label = "ipc_shm";
+ qcom,tx-is-first;
+ };
+
diff --git a/Documentation/devicetree/bindings/pci/pci-msm.txt b/Documentation/devicetree/bindings/pci/pci-msm.txt
index e9d411a..362b19e 100644
--- a/Documentation/devicetree/bindings/pci/pci-msm.txt
+++ b/Documentation/devicetree/bindings/pci/pci-msm.txt
@@ -310,6 +310,11 @@
Value type: <u32>
Definition: Offset from PCIe PHY base to check if PCIe PHY status
+- qcom,phy-status-bit:
+ Usage: required
+ Value type: <u32>
+ Definition: BIT to check PCIe PHY status
+
- qcom,phy-power-down-offset:
Usage: required
Value type: <u32>
@@ -468,6 +473,7 @@
qcom,pcie-phy-ver = <0x2101>; /* v2 version 1.01 */
qcom,phy-status-offset = <0x814>;
+ qcom,phy-status-bit = <6>;
qcom,phy-power-down-offset = <0x840>;
qcom,phy-sequence = <0x0840 0x03 0x0
0x0094 0x08 0x0
diff --git a/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
index 7da95f8..a42f491 100644
--- a/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
+++ b/Documentation/devicetree/bindings/platform/msm/qcom-geni-se.txt
@@ -13,6 +13,9 @@
Optional properties:
- qcom,iommu-s1-bypass: Boolean flag to bypass IOMMU stage 1 translation.
+- qcom,msm-bus,num-paths: Number of paths to put vote for.
+- qcom,msm-bus,vectors-bus-ids: Master and slave Endpoint IDs for DDR
+ and Corex/2x paths.
Optional subnodes:
qcom,iommu_qupv3_geni_se_cb: Child node representing the QUPV3 context
diff --git a/Documentation/filesystems/overlayfs.txt b/Documentation/filesystems/overlayfs.txt
index eef7d9d..f205898 100644
--- a/Documentation/filesystems/overlayfs.txt
+++ b/Documentation/filesystems/overlayfs.txt
@@ -102,6 +102,29 @@
such as metadata and extended attributes are reported for the upper
directory only. These attributes of the lower directory are hidden.
+credentials
+-----------
+
+By default, all access to the upper, lower and work directories is the
+recorded mounter's MAC and DAC credentials. The incoming accesses are
+checked against the caller's credentials.
+
+In the case where caller MAC or DAC credentials do not overlap, a
+use case available in older versions of the driver, the
+override_creds mount flag can be turned off and help when the use
+pattern has caller with legitimate credentials where the mounter
+does not. Several unintended side effects will occur though. The
+caller without certain key capabilities or lower privilege will not
+always be able to delete files or directories, create nodes, or
+search some restricted directories. The ability to search and read
+a directory entry is spotty as a result of the cache mechanism not
+retesting the credentials because of the assumption, a privileged
+caller can fill cache, then a lower privilege can read the directory
+cache. The uneven security model where cache, upperdir and workdir
+are opened at privilege, but accessed without creating a form of
+privilege escalation, should only be used with strict understanding
+of the side effects and of the security policies.
+
whiteouts and opaque directories
--------------------------------
diff --git a/Documentation/power/energy-model.txt b/Documentation/power/energy-model.txt
new file mode 100644
index 0000000..5a23c6f
--- /dev/null
+++ b/Documentation/power/energy-model.txt
@@ -0,0 +1,169 @@
+ ====================
+ Energy Model of CPUs
+ ====================
+
+1. Overview
+-----------
+
+The Energy Model (EM) framework serves as an interface between drivers knowing
+the power consumed by CPUs at various performance levels, and the kernel
+subsystems willing to use that information to make energy-aware decisions.
+
+The source of the information about the power consumed by CPUs can vary greatly
+from one platform to another. These power costs can be estimated using
+devicetree data in some cases. In others, the firmware will know better.
+Alternatively, userspace might be best positioned. And so on. In order to avoid
+each and every client subsystem to re-implement support for each and every
+possible source of information on its own, the EM framework intervenes as an
+abstraction layer which standardizes the format of power cost tables in the
+kernel, hence enabling to avoid redundant work.
+
+The figure below depicts an example of drivers (Arm-specific here, but the
+approach is applicable to any architecture) providing power costs to the EM
+framework, and interested clients reading the data from it.
+
+ +---------------+ +-----------------+ +---------------+
+ | Thermal (IPA) | | Scheduler (EAS) | | Other |
+ +---------------+ +-----------------+ +---------------+
+ | | em_pd_energy() |
+ | | em_cpu_get() |
+ +---------+ | +---------+
+ | | |
+ v v v
+ +---------------------+
+ | Energy Model |
+ | Framework |
+ +---------------------+
+ ^ ^ ^
+ | | | em_register_perf_domain()
+ +----------+ | +---------+
+ | | |
+ +---------------+ +---------------+ +--------------+
+ | cpufreq-dt | | arm_scmi | | Other |
+ +---------------+ +---------------+ +--------------+
+ ^ ^ ^
+ | | |
+ +--------------+ +---------------+ +--------------+
+ | Device Tree | | Firmware | | ? |
+ +--------------+ +---------------+ +--------------+
+
+The EM framework manages power cost tables per 'performance domain' in the
+system. A performance domain is a group of CPUs whose performance is scaled
+together. Performance domains generally have a 1-to-1 mapping with CPUFreq
+policies. All CPUs in a performance domain are required to have the same
+micro-architecture. CPUs in different performance domains can have different
+micro-architectures.
+
+
+2. Core APIs
+------------
+
+ 2.1 Config options
+
+CONFIG_ENERGY_MODEL must be enabled to use the EM framework.
+
+
+ 2.2 Registration of performance domains
+
+Drivers are expected to register performance domains into the EM framework by
+calling the following API:
+
+ int em_register_perf_domain(cpumask_t *span, unsigned int nr_states,
+ struct em_data_callback *cb);
+
+Drivers must specify the CPUs of the performance domains using the cpumask
+argument, and provide a callback function returning <frequency, power> tuples
+for each capacity state. The callback function provided by the driver is free
+to fetch data from any relevant location (DT, firmware, ...), and by any mean
+deemed necessary. See Section 3. for an example of driver implementing this
+callback, and kernel/power/energy_model.c for further documentation on this
+API.
+
+
+ 2.3 Accessing performance domains
+
+Subsystems interested in the energy model of a CPU can retrieve it using the
+em_cpu_get() API. The energy model tables are allocated once upon creation of
+the performance domains, and kept in memory untouched.
+
+The energy consumed by a performance domain can be estimated using the
+em_pd_energy() API. The estimation is performed assuming that the schedutil
+CPUfreq governor is in use.
+
+More details about the above APIs can be found in include/linux/energy_model.h.
+
+
+3. Example driver
+-----------------
+
+This section provides a simple example of a CPUFreq driver registering a
+performance domain in the Energy Model framework using the (fake) 'foo'
+protocol. The driver implements an est_power() function to be provided to the
+EM framework.
+
+ -> drivers/cpufreq/foo_cpufreq.c
+
+01 static int est_power(unsigned long *mW, unsigned long *KHz, int cpu)
+02 {
+03 long freq, power;
+04
+05 /* Use the 'foo' protocol to ceil the frequency */
+06 freq = foo_get_freq_ceil(cpu, *KHz);
+07 if (freq < 0);
+08 return freq;
+09
+10 /* Estimate the power cost for the CPU at the relevant freq. */
+11 power = foo_estimate_power(cpu, freq);
+12 if (power < 0);
+13 return power;
+14
+15 /* Return the values to the EM framework */
+16 *mW = power;
+17 *KHz = freq;
+18
+19 return 0;
+20 }
+21
+22 static int foo_cpufreq_init(struct cpufreq_policy *policy)
+23 {
+24 struct em_data_callback em_cb = EM_DATA_CB(est_power);
+25 int nr_opp, ret;
+26
+27 /* Do the actual CPUFreq init work ... */
+28 ret = do_foo_cpufreq_init(policy);
+29 if (ret)
+30 return ret;
+31
+32 /* Find the number of OPPs for this policy */
+33 nr_opp = foo_get_nr_opp(policy);
+34
+35 /* And register the new performance domain */
+36 em_register_perf_domain(policy->cpus, nr_opp, &em_cb);
+37
+38 return 0;
+39 }
+
+
+4. Support for legacy Energy Models (DEPRECATED)
+------------------------------------------------
+
+The Android kernel version 4.14 and before used a different type of EM for EAS,
+referred to as the 'legacy' EM. The legacy EM relies on the out-of-tree
+'sched-energy-costs' devicetree bindings to provide the kernel with power costs.
+The usage of such bindings in Android has now been DEPRECATED in favour of the
+mainline equivalents.
+
+The currently supported alternatives to populate the EM include:
+ - using a firmware-based solution such as Arm SCMI (supported in
+ drivers/cpufreq/scmi-cpufreq.c);
+ - using the 'dynamic-power-coefficient' devicetree binding together with
+ PM_OPP. See the of_dev_pm_opp_get_cpu_power() helper in PM_OPP, and the
+ reference implementation in drivers/cpufreq/cpufreq-dt.c.
+
+In order to ease the transition to the new EM format, Android 4.19 also provides
+a compatibility driver able to load a legacy EM from DT into the EM framework.
+*** Please note that THIS FEATURE WILL NOT BE AVAILABLE in future Android
+kernels, and as such it must be considered only as a temporary workaround. ***
+
+If you know what you're doing and still want to use this driver, you need to set
+CONFIG_LEGACY_ENERGY_MODEL_DT=y in your kernel configuration to enable it.
diff --git a/Documentation/scheduler/sched-energy.txt b/Documentation/scheduler/sched-energy.txt
new file mode 100644
index 0000000..197d81f
--- /dev/null
+++ b/Documentation/scheduler/sched-energy.txt
@@ -0,0 +1,425 @@
+ =======================
+ Energy Aware Scheduling
+ =======================
+
+1. Introduction
+---------------
+
+Energy Aware Scheduling (or EAS) gives the scheduler the ability to predict
+the impact of its decisions on the energy consumed by CPUs. EAS relies on an
+Energy Model (EM) of the CPUs to select an energy efficient CPU for each task,
+with a minimal impact on throughput. This document aims at providing an
+introduction on how EAS works, what are the main design decisions behind it, and
+details what is needed to get it to run.
+
+Before going any further, please note that at the time of writing:
+
+ /!\ EAS does not support platforms with symmetric CPU topologies /!\
+
+EAS operates only on heterogeneous CPU topologies (such as Arm big.LITTLE)
+because this is where the potential for saving energy through scheduling is
+the highest.
+
+The actual EM used by EAS is _not_ maintained by the scheduler, but by a
+dedicated framework. For details about this framework and what it provides,
+please refer to its documentation (see Documentation/power/energy-model.txt).
+
+
+2. Background and Terminology
+-----------------------------
+
+To make it clear from the start:
+ - energy = [joule] (resource like a battery on powered devices)
+ - power = energy/time = [joule/second] = [watt]
+
+The goal of EAS is to minimize energy, while still getting the job done. That
+is, we want to maximize:
+
+ performance [inst/s]
+ --------------------
+ power [W]
+
+which is equivalent to minimizing:
+
+ energy [J]
+ -----------
+ instruction
+
+while still getting 'good' performance. It is essentially an alternative
+optimization objective to the current performance-only objective for the
+scheduler. This alternative considers two objectives: energy-efficiency and
+performance.
+
+The idea behind introducing an EM is to allow the scheduler to evaluate the
+implications of its decisions rather than blindly applying energy-saving
+techniques that may have positive effects only on some platforms. At the same
+time, the EM must be as simple as possible to minimize the scheduler latency
+impact.
+
+In short, EAS changes the way CFS tasks are assigned to CPUs. When it is time
+for the scheduler to decide where a task should run (during wake-up), the EM
+is used to break the tie between several good CPU candidates and pick the one
+that is predicted to yield the best energy consumption without harming the
+system's throughput. The predictions made by EAS rely on specific elements of
+knowledge about the platform's topology, which include the 'capacity' of CPUs,
+and their respective energy costs.
+
+
+3. Topology information
+-----------------------
+
+EAS (as well as the rest of the scheduler) uses the notion of 'capacity' to
+differentiate CPUs with different computing throughput. The 'capacity' of a CPU
+represents the amount of work it can absorb when running at its highest
+frequency compared to the most capable CPU of the system. Capacity values are
+normalized in a 1024 range, and are comparable with the utilization signals of
+tasks and CPUs computed by the Per-Entity Load Tracking (PELT) mechanism. Thanks
+to capacity and utilization values, EAS is able to estimate how big/busy a
+task/CPU is, and to take this into consideration when evaluating performance vs
+energy trade-offs. The capacity of CPUs is provided via arch-specific code
+through the arch_scale_cpu_capacity() callback.
+
+The rest of platform knowledge used by EAS is directly read from the Energy
+Model (EM) framework. The EM of a platform is composed of a power cost table
+per 'performance domain' in the system (see Documentation/power/energy-model.txt
+for futher details about performance domains).
+
+The scheduler manages references to the EM objects in the topology code when the
+scheduling domains are built, or re-built. For each root domain (rd), the
+scheduler maintains a singly linked list of all performance domains intersecting
+the current rd->span. Each node in the list contains a pointer to a struct
+em_perf_domain as provided by the EM framework.
+
+The lists are attached to the root domains in order to cope with exclusive
+cpuset configurations. Since the boundaries of exclusive cpusets do not
+necessarily match those of performance domains, the lists of different root
+domains can contain duplicate elements.
+
+Example 1.
+ Let us consider a platform with 12 CPUs, split in 3 performance domains
+ (pd0, pd4 and pd8), organized as follows:
+
+ CPUs: 0 1 2 3 4 5 6 7 8 9 10 11
+ PDs: |--pd0--|--pd4--|---pd8---|
+ RDs: |----rd1----|-----rd2-----|
+
+ Now, consider that userspace decided to split the system with two
+ exclusive cpusets, hence creating two independent root domains, each
+ containing 6 CPUs. The two root domains are denoted rd1 and rd2 in the
+ above figure. Since pd4 intersects with both rd1 and rd2, it will be
+ present in the linked list '->pd' attached to each of them:
+ * rd1->pd: pd0 -> pd4
+ * rd2->pd: pd4 -> pd8
+
+ Please note that the scheduler will create two duplicate list nodes for
+ pd4 (one for each list). However, both just hold a pointer to the same
+ shared data structure of the EM framework.
+
+Since the access to these lists can happen concurrently with hotplug and other
+things, they are protected by RCU, like the rest of topology structures
+manipulated by the scheduler.
+
+EAS also maintains a static key (sched_energy_present) which is enabled when at
+least one root domain meets all conditions for EAS to start. Those conditions
+are summarized in Section 6.
+
+
+4. Energy-Aware task placement
+------------------------------
+
+EAS overrides the CFS task wake-up balancing code. It uses the EM of the
+platform and the PELT signals to choose an energy-efficient target CPU during
+wake-up balance. When EAS is enabled, select_task_rq_fair() calls
+find_energy_efficient_cpu() to do the placement decision. This function looks
+for the CPU with the highest spare capacity (CPU capacity - CPU utilization) in
+each performance domain since it is the one which will allow us to keep the
+frequency the lowest. Then, the function checks if placing the task there could
+save energy compared to leaving it on prev_cpu, i.e. the CPU where the task ran
+in its previous activation.
+
+find_energy_efficient_cpu() uses compute_energy() to estimate what will be the
+energy consumed by the system if the waking task was migrated. compute_energy()
+looks at the current utilization landscape of the CPUs and adjusts it to
+'simulate' the task migration. The EM framework provides the em_pd_energy() API
+which computes the expected energy consumption of each performance domain for
+the given utilization landscape.
+
+An example of energy-optimized task placement decision is detailed below.
+
+Example 2.
+ Let us consider a (fake) platform with 2 independent performance domains
+ composed of two CPUs each. CPU0 and CPU1 are little CPUs; CPU2 and CPU3
+ are big.
+
+ The scheduler must decide where to place a task P whose util_avg = 200
+ and prev_cpu = 0.
+
+ The current utilization landscape of the CPUs is depicted on the graph
+ below. CPUs 0-3 have a util_avg of 400, 100, 600 and 500 respectively
+ Each performance domain has three Operating Performance Points (OPPs).
+ The CPU capacity and power cost associated with each OPP is listed in
+ the Energy Model table. The util_avg of P is shown on the figures
+ below as 'PP'.
+
+ CPU util.
+ 1024 - - - - - - - Energy Model
+ +-----------+-------------+
+ | Little | Big |
+ 768 ============= +-----+-----+------+------+
+ | Cap | Pwr | Cap | Pwr |
+ +-----+-----+------+------+
+ 512 =========== - ##- - - - - | 170 | 50 | 512 | 400 |
+ ## ## | 341 | 150 | 768 | 800 |
+ 341 -PP - - - - ## ## | 512 | 300 | 1024 | 1700 |
+ PP ## ## +-----+-----+------+------+
+ 170 -## - - - - ## ##
+ ## ## ## ##
+ ------------ -------------
+ CPU0 CPU1 CPU2 CPU3
+
+ Current OPP: ===== Other OPP: - - - util_avg (100 each): ##
+
+
+ find_energy_efficient_cpu() will first look for the CPUs with the
+ maximum spare capacity in the two performance domains. In this example,
+ CPU1 and CPU3. Then it will estimate the energy of the system if P was
+ placed on either of them, and check if that would save some energy
+ compared to leaving P on CPU0. EAS assumes that OPPs follow utilization
+ (which is coherent with the behaviour of the schedutil CPUFreq
+ governor, see Section 6. for more details on this topic).
+
+ Case 1. P is migrated to CPU1
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ 1024 - - - - - - -
+
+ Energy calculation:
+ 768 ============= * CPU0: 200 / 341 * 150 = 88
+ * CPU1: 300 / 341 * 150 = 131
+ * CPU2: 600 / 768 * 800 = 625
+ 512 - - - - - - - ##- - - - - * CPU3: 500 / 768 * 800 = 520
+ ## ## => total_energy = 1364
+ 341 =========== ## ##
+ PP ## ##
+ 170 -## - - PP- ## ##
+ ## ## ## ##
+ ------------ -------------
+ CPU0 CPU1 CPU2 CPU3
+
+
+ Case 2. P is migrated to CPU3
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ 1024 - - - - - - -
+
+ Energy calculation:
+ 768 ============= * CPU0: 200 / 341 * 150 = 88
+ * CPU1: 100 / 341 * 150 = 43
+ PP * CPU2: 600 / 768 * 800 = 625
+ 512 - - - - - - - ##- - -PP - * CPU3: 700 / 768 * 800 = 729
+ ## ## => total_energy = 1485
+ 341 =========== ## ##
+ ## ##
+ 170 -## - - - - ## ##
+ ## ## ## ##
+ ------------ -------------
+ CPU0 CPU1 CPU2 CPU3
+
+
+ Case 3. P stays on prev_cpu / CPU 0
+ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+ 1024 - - - - - - -
+
+ Energy calculation:
+ 768 ============= * CPU0: 400 / 512 * 300 = 234
+ * CPU1: 100 / 512 * 300 = 58
+ * CPU2: 600 / 768 * 800 = 625
+ 512 =========== - ##- - - - - * CPU3: 500 / 768 * 800 = 520
+ ## ## => total_energy = 1437
+ 341 -PP - - - - ## ##
+ PP ## ##
+ 170 -## - - - - ## ##
+ ## ## ## ##
+ ------------ -------------
+ CPU0 CPU1 CPU2 CPU3
+
+
+ From these calculations, the Case 1 has the lowest total energy. So CPU 1
+ is be the best candidate from an energy-efficiency standpoint.
+
+Big CPUs are generally more power hungry than the little ones and are thus used
+mainly when a task doesn't fit the littles. However, little CPUs aren't always
+necessarily more energy-efficient than big CPUs. For some systems, the high OPPs
+of the little CPUs can be less energy-efficient than the lowest OPPs of the
+bigs, for example. So, if the little CPUs happen to have enough utilization at
+a specific point in time, a small task waking up at that moment could be better
+of executing on the big side in order to save energy, even though it would fit
+on the little side.
+
+And even in the case where all OPPs of the big CPUs are less energy-efficient
+than those of the little, using the big CPUs for a small task might still, under
+specific conditions, save energy. Indeed, placing a task on a little CPU can
+result in raising the OPP of the entire performance domain, and that will
+increase the cost of the tasks already running there. If the waking task is
+placed on a big CPU, its own execution cost might be higher than if it was
+running on a little, but it won't impact the other tasks of the little CPUs
+which will keep running at a lower OPP. So, when considering the total energy
+consumed by CPUs, the extra cost of running that one task on a big core can be
+smaller than the cost of raising the OPP on the little CPUs for all the other
+tasks.
+
+The examples above would be nearly impossible to get right in a generic way, and
+for all platforms, without knowing the cost of running at different OPPs on all
+CPUs of the system. Thanks to its EM-based design, EAS should cope with them
+correctly without too many troubles. However, in order to ensure a minimal
+impact on throughput for high-utilization scenarios, EAS also implements another
+mechanism called 'over-utilization'.
+
+
+5. Over-utilization
+-------------------
+
+From a general standpoint, the use-cases where EAS can help the most are those
+involving a light/medium CPU utilization. Whenever long CPU-bound tasks are
+being run, they will require all of the available CPU capacity, and there isn't
+much that can be done by the scheduler to save energy without severly harming
+throughput. In order to avoid hurting performance with EAS, CPUs are flagged as
+'over-utilized' as soon as they are used at more than 80% of their compute
+capacity. As long as no CPUs are over-utilized in a root domain, load balancing
+is disabled and EAS overridess the wake-up balancing code. EAS is likely to load
+the most energy efficient CPUs of the system more than the others if that can be
+done without harming throughput. So, the load-balancer is disabled to prevent
+it from breaking the energy-efficient task placement found by EAS. It is safe to
+do so when the system isn't overutilized since being below the 80% tipping point
+implies that:
+
+ a. there is some idle time on all CPUs, so the utilization signals used by
+ EAS are likely to accurately represent the 'size' of the various tasks
+ in the system;
+ b. all tasks should already be provided with enough CPU capacity,
+ regardless of their nice values;
+ c. since there is spare capacity all tasks must be blocking/sleeping
+ regularly and balancing at wake-up is sufficient.
+
+As soon as one CPU goes above the 80% tipping point, at least one of the three
+assumptions above becomes incorrect. In this scenario, the 'overutilized' flag
+is raised for the entire root domain, EAS is disabled, and the load-balancer is
+re-enabled. By doing so, the scheduler falls back onto load-based algorithms for
+wake-up and load balance under CPU-bound conditions. This provides a better
+respect of the nice values of tasks.
+
+Since the notion of overutilization largely relies on detecting whether or not
+there is some idle time in the system, the CPU capacity 'stolen' by higher
+(than CFS) scheduling classes (as well as IRQ) must be taken into account. As
+such, the detection of overutilization accounts for the capacity used not only
+by CFS tasks, but also by the other scheduling classes and IRQ.
+
+
+6. Dependencies and requirements for EAS
+----------------------------------------
+
+Energy Aware Scheduling depends on the CPUs of the system having specific
+hardware properties and on other features of the kernel being enabled. This
+section lists these dependencies and provides hints as to how they can be met.
+
+
+ 6.1 - Asymmetric CPU topology
+
+As mentioned in the introduction, EAS is only supported on platforms with
+asymmetric CPU topologies for now. This requirement is checked at run-time by
+looking for the presence of the SD_ASYM_CPUCAPACITY flag when the scheduling
+domains are built.
+
+The flag is set/cleared automatically by the scheduler topology code whenever
+there are CPUs with different capacities in a root domain. The capacities of
+CPUs are provided by arch-specific code through the arch_scale_cpu_capacity()
+callback. As an example, arm and arm64 share an implementation of this callback
+which uses a combination of CPUFreq data and device-tree bindings to compute the
+capacity of CPUs (see drivers/base/arch_topology.c for more details).
+
+So, in order to use EAS on your platform your architecture must implement the
+arch_scale_cpu_capacity() callback, and some of the CPUs must have a lower
+capacity than others.
+
+Please note that EAS is not fundamentally incompatible with SMP, but no
+significant savings on SMP platforms have been observed yet. This restriction
+could be amended in the future if proven otherwise.
+
+
+ 6.2 - Energy Model presence
+
+EAS uses the EM of a platform to estimate the impact of scheduling decisions on
+energy. So, your platform must provide power cost tables to the EM framework in
+order to make EAS start. To do so, please refer to documentation of the
+independent EM framework in Documentation/power/energy-model.txt.
+
+Please also note that the scheduling domains need to be re-built after the
+EM has been registered in order to start EAS.
+
+
+ 6.3 - Energy Model complexity
+
+The task wake-up path is very latency-sensitive. When the EM of a platform is
+too complex (too many CPUs, too many performance domains, too many performance
+states, ...), the cost of using it in the wake-up path can become prohibitive.
+The energy-aware wake-up algorithm has a complexity of:
+
+ C = Nd * (Nc + Ns)
+
+with: Nd the number of performance domains; Nc the number of CPUs; and Ns the
+total number of OPPs (ex: for two perf. domains with 4 OPPs each, Ns = 8).
+
+A complexity check is performed at the root domain level, when scheduling
+domains are built. EAS will not start on a root domain if its C happens to be
+higher than the completely arbitrary EM_MAX_COMPLEXITY threshold (2048 at the
+time of writing).
+
+If you really want to use EAS but the complexity of your platform's Energy
+Model is too high to be used with a single root domain, you're left with only
+two possible options:
+
+ 1. split your system into separate, smaller, root domains using exclusive
+ cpusets and enable EAS locally on each of them. This option has the
+ benefit to work out of the box but the drawback of preventing load
+ balance between root domains, which can result in an unbalanced system
+ overall;
+ 2. submit patches to reduce the complexity of the EAS wake-up algorithm,
+ hence enabling it to cope with larger EMs in reasonable time.
+
+
+ 6.4 - Schedutil governor
+
+EAS tries to predict at which OPP will the CPUs be running in the close future
+in order to estimate their energy consumption. To do so, it is assumed that OPPs
+of CPUs follow their utilization.
+
+Although it is very difficult to provide hard guarantees regarding the accuracy
+of this assumption in practice (because the hardware might not do what it is
+told to do, for example), schedutil as opposed to other CPUFreq governors at
+least _requests_ frequencies calculated using the utilization signals.
+Consequently, the only sane governor to use together with EAS is schedutil,
+because it is the only one providing some degree of consistency between
+frequency requests and energy predictions.
+
+Using EAS with any other governor than schedutil is not supported.
+
+
+ 6.5 Scale-invariant utilization signals
+
+In order to make accurate prediction across CPUs and for all performance
+states, EAS needs frequency-invariant and CPU-invariant PELT signals. These can
+be obtained using the architecture-defined arch_scale{cpu,freq}_capacity()
+callbacks.
+
+Using EAS on a platform that doesn't implement these two callbacks is not
+supported.
+
+
+ 6.6 Multithreading (SMT)
+
+EAS in its current form is SMT unaware and is not able to leverage
+multithreaded hardware to save energy. EAS considers threads as independent
+CPUs, which can actually be counter-productive for both performance and energy.
+
+EAS on SMT is not supported.
diff --git a/Makefile b/Makefile
index c05200f..277bce1 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
VERSION = 4
PATCHLEVEL = 19
-SUBLEVEL = 27
+SUBLEVEL = 30
EXTRAVERSION =
NAME = "People's Front"
diff --git a/arch/arm/boot/dts/exynos3250.dtsi b/arch/arm/boot/dts/exynos3250.dtsi
index 27a1ee2..94efca7 100644
--- a/arch/arm/boot/dts/exynos3250.dtsi
+++ b/arch/arm/boot/dts/exynos3250.dtsi
@@ -168,6 +168,9 @@
interrupt-controller;
#interrupt-cells = <3>;
interrupt-parent = <&gic>;
+ clock-names = "clkout8";
+ clocks = <&cmu CLK_FIN_PLL>;
+ #clock-cells = <1>;
};
mipi_phy: video-phy {
diff --git a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
index a09e46c..00820d2 100644
--- a/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
+++ b/arch/arm/boot/dts/exynos4412-odroid-common.dtsi
@@ -49,7 +49,7 @@
};
emmc_pwrseq: pwrseq {
- pinctrl-0 = <&sd1_cd>;
+ pinctrl-0 = <&emmc_rstn>;
pinctrl-names = "default";
compatible = "mmc-pwrseq-emmc";
reset-gpios = <&gpk1 2 GPIO_ACTIVE_LOW>;
@@ -161,12 +161,6 @@
cpu0-supply = <&buck2_reg>;
};
-/* RSTN signal for eMMC */
-&sd1_cd {
- samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
- samsung,pin-drv = <EXYNOS4_PIN_DRV_LV1>;
-};
-
&pinctrl_1 {
gpio_power_key: power_key {
samsung,pins = "gpx1-3";
@@ -184,6 +178,11 @@
samsung,pins = "gpx3-7";
samsung,pin-pud = <EXYNOS_PIN_PULL_DOWN>;
};
+
+ emmc_rstn: emmc-rstn {
+ samsung,pins = "gpk1-2";
+ samsung,pin-pud = <EXYNOS_PIN_PULL_NONE>;
+ };
};
&ehci {
diff --git a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
index 2f4f408..27214e6 100644
--- a/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
+++ b/arch/arm/boot/dts/exynos5422-odroid-core.dtsi
@@ -334,7 +334,7 @@
buck8_reg: BUCK8 {
regulator-name = "vdd_1.8v_ldo";
regulator-min-microvolt = <800000>;
- regulator-max-microvolt = <1500000>;
+ regulator-max-microvolt = <2000000>;
regulator-always-on;
regulator-boot-on;
};
diff --git a/arch/arm/boot/dts/imx6sx.dtsi b/arch/arm/boot/dts/imx6sx.dtsi
index 844caa3..50083ce 100644
--- a/arch/arm/boot/dts/imx6sx.dtsi
+++ b/arch/arm/boot/dts/imx6sx.dtsi
@@ -462,7 +462,7 @@
};
gpt: gpt@2098000 {
- compatible = "fsl,imx6sx-gpt", "fsl,imx31-gpt";
+ compatible = "fsl,imx6sx-gpt", "fsl,imx6dl-gpt";
reg = <0x02098000 0x4000>;
interrupts = <GIC_SPI 55 IRQ_TYPE_LEVEL_HIGH>;
clocks = <&clks IMX6SX_CLK_GPT_BUS>,
diff --git a/arch/arm/boot/dts/meson.dtsi b/arch/arm/boot/dts/meson.dtsi
index 0d9faf1..a86b890 100644
--- a/arch/arm/boot/dts/meson.dtsi
+++ b/arch/arm/boot/dts/meson.dtsi
@@ -263,7 +263,7 @@
compatible = "amlogic,meson6-dwmac", "snps,dwmac";
reg = <0xc9410000 0x10000
0xc1108108 0x4>;
- interrupts = <GIC_SPI 8 IRQ_TYPE_EDGE_RISING>;
+ interrupts = <GIC_SPI 8 IRQ_TYPE_LEVEL_HIGH>;
interrupt-names = "macirq";
status = "disabled";
};
diff --git a/arch/arm/boot/dts/meson8b-odroidc1.dts b/arch/arm/boot/dts/meson8b-odroidc1.dts
index ef3177d3..8fdeeff 100644
--- a/arch/arm/boot/dts/meson8b-odroidc1.dts
+++ b/arch/arm/boot/dts/meson8b-odroidc1.dts
@@ -125,7 +125,6 @@
/* Realtek RTL8211F (0x001cc916) */
eth_phy: ethernet-phy@0 {
reg = <0>;
- eee-broken-1000t;
interrupt-parent = <&gpio_intc>;
/* GPIOH_3 */
interrupts = <17 IRQ_TYPE_LEVEL_LOW>;
@@ -172,8 +171,7 @@
cap-sd-highspeed;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&tflash_vdd>;
vqmmc-supply = <&tf_io>;
diff --git a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
index f585361..6ac02be 100644
--- a/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
+++ b/arch/arm/boot/dts/meson8m2-mxiii-plus.dts
@@ -206,8 +206,7 @@
cap-sd-highspeed;
disable-wp;
- cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_HIGH>;
- cd-inverted;
+ cd-gpios = <&gpio CARD_6 GPIO_ACTIVE_LOW>;
vmmc-supply = <&vcc_3v3>;
};
diff --git a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
index ddc7a7b..f57acf8 100644
--- a/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
+++ b/arch/arm/boot/dts/motorola-cpcap-mapphone.dtsi
@@ -105,7 +105,7 @@
interrupts-extended = <
&cpcap 15 0 &cpcap 14 0 &cpcap 28 0 &cpcap 19 0
&cpcap 18 0 &cpcap 17 0 &cpcap 16 0 &cpcap 49 0
- &cpcap 48 1
+ &cpcap 48 0
>;
interrupt-names =
"id_ground", "id_float", "se0conn", "vbusvld",
diff --git a/arch/arm/boot/dts/omap3-n950-n9.dtsi b/arch/arm/boot/dts/omap3-n950-n9.dtsi
index 0d9b853..e142e6c 100644
--- a/arch/arm/boot/dts/omap3-n950-n9.dtsi
+++ b/arch/arm/boot/dts/omap3-n950-n9.dtsi
@@ -370,6 +370,19 @@
compatible = "ti,omap2-onenand";
reg = <0 0 0x20000>; /* CS0, offset 0, IO size 128K */
+ /*
+ * These timings are based on CONFIG_OMAP_GPMC_DEBUG=y reported
+ * bootloader set values when booted with v4.19 using both N950
+ * and N9 devices (OneNAND Manufacturer: Samsung):
+ *
+ * gpmc cs0 before gpmc_cs_program_settings:
+ * cs0 GPMC_CS_CONFIG1: 0xfd001202
+ * cs0 GPMC_CS_CONFIG2: 0x00181800
+ * cs0 GPMC_CS_CONFIG3: 0x00030300
+ * cs0 GPMC_CS_CONFIG4: 0x18001804
+ * cs0 GPMC_CS_CONFIG5: 0x03171d1d
+ * cs0 GPMC_CS_CONFIG6: 0x97080000
+ */
gpmc,sync-read;
gpmc,sync-write;
gpmc,burst-length = <16>;
@@ -379,26 +392,27 @@
gpmc,device-width = <2>;
gpmc,mux-add-data = <2>;
gpmc,cs-on-ns = <0>;
- gpmc,cs-rd-off-ns = <87>;
- gpmc,cs-wr-off-ns = <87>;
+ gpmc,cs-rd-off-ns = <122>;
+ gpmc,cs-wr-off-ns = <122>;
gpmc,adv-on-ns = <0>;
- gpmc,adv-rd-off-ns = <10>;
- gpmc,adv-wr-off-ns = <10>;
- gpmc,oe-on-ns = <15>;
- gpmc,oe-off-ns = <87>;
+ gpmc,adv-rd-off-ns = <15>;
+ gpmc,adv-wr-off-ns = <15>;
+ gpmc,oe-on-ns = <20>;
+ gpmc,oe-off-ns = <122>;
gpmc,we-on-ns = <0>;
- gpmc,we-off-ns = <87>;
- gpmc,rd-cycle-ns = <112>;
- gpmc,wr-cycle-ns = <112>;
- gpmc,access-ns = <81>;
+ gpmc,we-off-ns = <122>;
+ gpmc,rd-cycle-ns = <148>;
+ gpmc,wr-cycle-ns = <148>;
+ gpmc,access-ns = <117>;
gpmc,page-burst-access-ns = <15>;
gpmc,bus-turnaround-ns = <0>;
gpmc,cycle2cycle-delay-ns = <0>;
gpmc,wait-monitoring-ns = <0>;
- gpmc,clk-activation-ns = <5>;
- gpmc,wr-data-mux-bus-ns = <30>;
- gpmc,wr-access-ns = <81>;
- gpmc,sync-clk-ps = <15000>;
+ gpmc,clk-activation-ns = <10>;
+ gpmc,wr-data-mux-bus-ns = <40>;
+ gpmc,wr-access-ns = <117>;
+
+ gpmc,sync-clk-ps = <15000>; /* TBC; Where this value came? */
/*
* MTD partition table corresponding to Nokia's MeeGo 1.2
diff --git a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
index 5d23667..25540b7 100644
--- a/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
+++ b/arch/arm/boot/dts/sun8i-h3-beelink-x2.dts
@@ -53,7 +53,7 @@
aliases {
serial0 = &uart0;
- /* ethernet0 is the H3 emac, defined in sun8i-h3.dtsi */
+ ethernet0 = &emac;
ethernet1 = &sdiowifi;
};
diff --git a/arch/arm/plat-pxa/ssp.c b/arch/arm/plat-pxa/ssp.c
index ed36dca..f519199 100644
--- a/arch/arm/plat-pxa/ssp.c
+++ b/arch/arm/plat-pxa/ssp.c
@@ -190,8 +190,6 @@
if (ssp == NULL)
return -ENODEV;
- iounmap(ssp->mmio_base);
-
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
release_mem_region(res->start, resource_size(res));
@@ -201,7 +199,6 @@
list_del(&ssp->node);
mutex_unlock(&ssp_lock);
- kfree(ssp);
return 0;
}
diff --git a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
index f4964be..e80a792 100644
--- a/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
+++ b/arch/arm64/boot/dts/hisilicon/hi6220-hikey.dts
@@ -118,6 +118,7 @@
reset-gpios = <&gpio0 5 GPIO_ACTIVE_LOW>;
clocks = <&pmic>;
clock-names = "ext_clock";
+ post-power-on-delay-ms = <10>;
power-off-delay-us = <10>;
};
@@ -300,7 +301,6 @@
dwmmc_0: dwmmc0@f723d000 {
cap-mmc-highspeed;
- mmc-hs200-1_8v;
non-removable;
bus-width = <0x8>;
vmmc-supply = <&ldo19>;
diff --git a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
index b9e7d0a..657fd87 100644
--- a/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-audio-overlay.dtsi
@@ -71,7 +71,7 @@
qcom,rx-swr-gpios = <&rx_swr_gpios>;
qcom,rx_mclk_mode_muxsel = <0x033240D8>;
qcom,rx-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
- qcom,mux0-clk-id = <TX_CORE_CLK>;
+ qcom,default-clk-id = <TX_CORE_CLK>;
swr1: rx_swr_master {
compatible = "qcom,swr-mstr";
#address-cells = <2>;
@@ -104,7 +104,7 @@
<&clock_audio_wsa_2 0>;
qcom,wsa-swr-gpios = <&wsa_swr_gpios>;
qcom,wsa-bcl-pmic-params = /bits/ 8 <0x00 0x00 0x1E>;
- qcom,mux0-clk-id = <TX_CORE_CLK>;
+ qcom,default-clk-id = <TX_CORE_CLK>;
swr0: wsa_swr_master {
compatible = "qcom,swr-mstr";
#address-cells = <2>;
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
index 8b766c3..434c601 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-cdp.dtsi
@@ -41,6 +41,54 @@
rgltr-load-current = <100000>;
};
+ actuator_rear_aux: qcom,actuator1 {
+ cell-index = <1>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2856000>;
+ rgltr-max-voltage = <3104000>;
+ rgltr-load-current = <100000>;
+ };
+
+ actuator_triple_wide: qcom,actuator4 {
+ cell-index = <4>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2856000>;
+ rgltr-max-voltage = <3104000>;
+ rgltr-load-current = <100000>;
+ };
+
+ actuator_triple_tele: qcom,actuator5 {
+ cell-index = <5>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2856000>;
+ rgltr-max-voltage = <3104000>;
+ rgltr-load-current = <100000>;
+ };
+
+ actuator_triple_uw: qcom,actuator6 {
+ cell-index = <6>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2856000>;
+ rgltr-max-voltage = <3104000>;
+ rgltr-load-current = <100000>;
+ };
+
eeprom_rear: qcom,eeprom0 {
cell-index = <0>;
compatible = "qcom,eeprom";
@@ -115,6 +163,117 @@
clock-rates = <24000000>;
};
+ eeprom_triple_wide: qcom,eeprom4 {
+ cell-index = <4>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8009_l7>;
+ cam_vana-supply = <&pm8009_l5>;
+ cam_vdig-supply = <&pm8009_l1>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
+ rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
+ rgltr-load-current = <0 80000 1200000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_active_rear>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_suspend_rear>;
+ gpios = <&tlmm 94 0>,
+ <&tlmm 93 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_triple_tele: qcom,eeprom5 {
+ cell-index = <1>;
+ compatible = "qcom,eeprom";
+ cam_vdig-supply = <&pm8009_l2>;
+ cam_vio-supply = <&pm8009_l7>;
+ cam_vana-supply = <&pm8009_l6>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
+ rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
+ rgltr-load-current = <0 80000 1200000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_active_rear_aux>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_suspend_rear_aux>;
+ gpios = <&tlmm 95 0>,
+ <&tlmm 92 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_triple_uw: qcom,eeprom6 {
+ cell-index = <6>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8009_l7>;
+ cam_vana-supply = <&pm8009_l6>;
+ cam_vdig-supply = <&pm8009_l3>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
+ rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
+ rgltr-load-current = <0 80000 1200000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_active_rst2>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_suspend_rst2>;
+ gpios = <&tlmm 96 0>,
+ <&tlmm 78 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-position = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
qcom,cam-sensor0 {
cell-index = <0>;
compatible = "qcom,cam-sensor";
@@ -123,8 +282,8 @@
sensor-position-pitch = <0>;
sensor-position-yaw = <180>;
actuator-src = <&actuator_rear>;
- eeprom-src = <&eeprom_rear>;
led-flash-src = <&led_flash_rear>;
+ eeprom-src = <&eeprom_rear>;
cam_vio-supply = <&pm8009_l7>;
cam_bob-supply = <&pm8150a_bob>;
cam_vana-supply = <&pm8009_l5>;
@@ -201,6 +360,137 @@
clock-cntl-level = "turbo";
clock-rates = <24000000>;
};
+
+ qcom,cam-sensor4 {
+ cell-index = <4>;
+ compatible = "qcom,cam-sensor";
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ actuator-src = <&actuator_triple_wide>;
+ led-flash-src = <&led_flash_rear>;
+ eeprom-src = <&eeprom_triple_wide>;
+ cam_vio-supply = <&pm8009_l7>;
+ cam_bob-supply = <&pm8150a_bob>;
+ cam_vana-supply = <&pm8009_l5>;
+ cam_vdig-supply = <&pm8009_l1>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_bob";
+ rgltr-cntrl-support;
+ pwm-switch;
+ rgltr-min-voltage = <0 2800000 1104000 0 3008000>;
+ rgltr-max-voltage = <0 3000000 1104000 0 3960000>;
+ rgltr-load-current = <0 80000 1200000 0 2000000>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_active_rear>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_suspend_rear>;
+ gpios = <&tlmm 94 0>,
+ <&tlmm 93 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor5 {
+ cell-index = <5>;
+ compatible = "qcom,cam-sensor";
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_triple_tele>;
+ actuator-src = <&actuator_triple_tele>;
+ led-flash-src = <&led_flash_rear_aux>;
+ cam_bob-supply = <&pm8150a_bob>;
+ cam_vdig-supply = <&pm8009_l2>;
+ cam_vio-supply = <&pm8009_l7>;
+ cam_vana-supply = <&pm8009_l6>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_bob";
+ rgltr-cntrl-support;
+ pwm-switch;
+ rgltr-min-voltage = <0 2800000 1200000 0 3008000>;
+ rgltr-max-voltage = <0 3000000 1200000 0 3960000>;
+ rgltr-load-current = <0 80000 1200000 0 2000000>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_active_rear_aux>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_suspend_rear_aux>;
+ gpios = <&tlmm 95 0>,
+ <&tlmm 92 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor6 {
+ cell-index = <6>;
+ compatible = "qcom,cam-sensor";
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_triple_uw>;
+ actuator-src = <&actuator_triple_uw>;
+ cam_vio-supply = <&pm8009_l7>;
+ cam_bob-supply = <&pm8150a_bob>;
+ cam_vana-supply = <&pm8009_l6>;
+ cam_vdig-supply = <&pm8009_l3>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_bob";
+ rgltr-cntrl-support;
+ pwm-switch;
+ rgltr-min-voltage = <0 2800000 1056000 0 3008000>;
+ rgltr-max-voltage = <0 3000000 1056000 0 3960000>;
+ rgltr-load-current = <0 80000 1200000 0 2000000>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_active_rst2>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_suspend_rst2>;
+ gpios = <&tlmm 96 0>,
+ <&tlmm 78 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
};
&cam_cci1 {
@@ -221,9 +511,9 @@
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
- &cam_sensor_active_front>;
+ &cam_sensor_active_rst2>;
pinctrl-1 = <&cam_sensor_mclk2_suspend
- &cam_sensor_suspend_front>;
+ &cam_sensor_suspend_rst2>;
gpios = <&tlmm 96 0>,
<&tlmm 78 0>;
gpio-reset = <1>;
@@ -245,7 +535,7 @@
cell-index = <2>;
compatible = "qcom,cam-sensor";
csiphy-sd-index = <2>;
- sensor-position-roll = <90>;
+ sensor-position-roll = <270>;
sensor-position-pitch = <0>;
sensor-position-yaw = <0>;
eeprom-src = <&eeprom_front>;
@@ -264,9 +554,9 @@
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
- &cam_sensor_active_front>;
+ &cam_sensor_active_rst2>;
pinctrl-1 = <&cam_sensor_mclk2_suspend
- &cam_sensor_suspend_front>;
+ &cam_sensor_suspend_rst2>;
gpios = <&tlmm 96 0>,
<&tlmm 78 0>;
gpio-reset = <1>;
@@ -321,4 +611,3 @@
clock-rates = <24000000>;
};
};
-
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
index d90af20..120fcc1 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-mtp.dtsi
@@ -41,6 +41,54 @@
rgltr-load-current = <100000>;
};
+ actuator_rear_aux: qcom,actuator1 {
+ cell-index = <1>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2856000>;
+ rgltr-max-voltage = <3104000>;
+ rgltr-load-current = <100000>;
+ };
+
+ actuator_triple_wide: qcom,actuator4 {
+ cell-index = <4>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2856000>;
+ rgltr-max-voltage = <3104000>;
+ rgltr-load-current = <100000>;
+ };
+
+ actuator_triple_tele: qcom,actuator5 {
+ cell-index = <5>;
+ compatible = "qcom,actuator";
+ cci-master = <0>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2856000>;
+ rgltr-max-voltage = <3104000>;
+ rgltr-load-current = <100000>;
+ };
+
+ actuator_triple_uw: qcom,actuator6 {
+ cell-index = <6>;
+ compatible = "qcom,actuator";
+ cci-master = <1>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <2856000>;
+ rgltr-max-voltage = <3104000>;
+ rgltr-load-current = <100000>;
+ };
+
eeprom_rear: qcom,eeprom0 {
cell-index = <0>;
compatible = "qcom,eeprom";
@@ -115,6 +163,117 @@
clock-rates = <24000000>;
};
+ eeprom_triple_wide: qcom,eeprom4 {
+ cell-index = <4>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8009_l7>;
+ cam_vana-supply = <&pm8009_l5>;
+ cam_vdig-supply = <&pm8009_l1>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 2800000 1104000 0 2856000>;
+ rgltr-max-voltage = <0 3000000 1104000 0 3104000>;
+ rgltr-load-current = <0 80000 1200000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_active_rear>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_suspend_rear>;
+ gpios = <&tlmm 94 0>,
+ <&tlmm 93 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_triple_tele: qcom,eeprom5 {
+ cell-index = <1>;
+ compatible = "qcom,eeprom";
+ cam_vdig-supply = <&pm8009_l2>;
+ cam_vio-supply = <&pm8009_l7>;
+ cam_vana-supply = <&pm8009_l6>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 2800000 1200000 0 2856000>;
+ rgltr-max-voltage = <0 3000000 1200000 0 3104000>;
+ rgltr-load-current = <0 80000 1200000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_active_rear_aux>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_suspend_rear_aux>;
+ gpios = <&tlmm 95 0>,
+ <&tlmm 92 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-position = <0>;
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ eeprom_triple_uw: qcom,eeprom6 {
+ cell-index = <6>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm8009_l7>;
+ cam_vana-supply = <&pm8009_l6>;
+ cam_vdig-supply = <&pm8009_l3>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ cam_vaf-supply = <&pm8150a_l7>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_vaf";
+ rgltr-cntrl-support;
+ rgltr-min-voltage = <0 2800000 1056000 0 2856000>;
+ rgltr-max-voltage = <0 3000000 1056000 0 3104000>;
+ rgltr-load-current = <0 80000 1200000 0 0>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_active_rst2>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_suspend_rst2>;
+ gpios = <&tlmm 96 0>,
+ <&tlmm 78 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-position = <1>;
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
qcom,cam-sensor0 {
cell-index = <0>;
compatible = "qcom,cam-sensor";
@@ -201,6 +360,137 @@
clock-cntl-level = "turbo";
clock-rates = <24000000>;
};
+
+ qcom,cam-sensor4 {
+ cell-index = <4>;
+ compatible = "qcom,cam-sensor";
+ csiphy-sd-index = <0>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ actuator-src = <&actuator_triple_wide>;
+ led-flash-src = <&led_flash_rear>;
+ eeprom-src = <&eeprom_triple_wide>;
+ cam_vio-supply = <&pm8009_l7>;
+ cam_bob-supply = <&pm8150a_bob>;
+ cam_vana-supply = <&pm8009_l5>;
+ cam_vdig-supply = <&pm8009_l1>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_bob";
+ rgltr-cntrl-support;
+ pwm-switch;
+ rgltr-min-voltage = <0 2800000 1104000 0 3008000>;
+ rgltr-max-voltage = <0 3000000 1104000 0 3960000>;
+ rgltr-load-current = <0 80000 1200000 0 2000000>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_active_rear>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_suspend_rear>;
+ gpios = <&tlmm 94 0>,
+ <&tlmm 93 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK0_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor5 {
+ cell-index = <5>;
+ compatible = "qcom,cam-sensor";
+ csiphy-sd-index = <1>;
+ sensor-position-roll = <90>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <180>;
+ eeprom-src = <&eeprom_triple_tele>;
+ actuator-src = <&actuator_triple_tele>;
+ led-flash-src = <&led_flash_rear_aux>;
+ cam_bob-supply = <&pm8150a_bob>;
+ cam_vdig-supply = <&pm8009_l2>;
+ cam_vio-supply = <&pm8009_l7>;
+ cam_vana-supply = <&pm8009_l6>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_bob";
+ rgltr-cntrl-support;
+ pwm-switch;
+ rgltr-min-voltage = <0 2800000 1200000 0 3008000>;
+ rgltr-max-voltage = <0 3000000 1200000 0 3960000>;
+ rgltr-load-current = <0 80000 1200000 0 2000000>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_active_rear_aux>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_suspend_rear_aux>;
+ gpios = <&tlmm 95 0>,
+ <&tlmm 92 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1";
+ sensor-mode = <0>;
+ cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK1_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
+
+ qcom,cam-sensor6 {
+ cell-index = <6>;
+ compatible = "qcom,cam-sensor";
+ csiphy-sd-index = <2>;
+ sensor-position-roll = <270>;
+ sensor-position-pitch = <0>;
+ sensor-position-yaw = <0>;
+ eeprom-src = <&eeprom_triple_uw>;
+ actuator-src = <&actuator_triple_uw>;
+ cam_vio-supply = <&pm8009_l7>;
+ cam_bob-supply = <&pm8150a_bob>;
+ cam_vana-supply = <&pm8009_l6>;
+ cam_vdig-supply = <&pm8009_l3>;
+ cam_clk-supply = <&titan_top_gdsc>;
+ regulator-names = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_clk", "cam_bob";
+ rgltr-cntrl-support;
+ pwm-switch;
+ rgltr-min-voltage = <0 2800000 1056000 0 3008000>;
+ rgltr-max-voltage = <0 3000000 1056000 0 3960000>;
+ rgltr-load-current = <0 80000 1200000 0 2000000>;
+ gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_active_rst2>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_suspend_rst2>;
+ gpios = <&tlmm 96 0>,
+ <&tlmm 78 0>;
+ gpio-reset = <1>;
+ gpio-req-tbl-num = <0 1>;
+ gpio-req-tbl-flags = <1 0>;
+ gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2";
+ sensor-mode = <0>;
+ cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_camcc CAM_CC_MCLK2_CLK>;
+ clock-names = "cam_clk";
+ clock-cntl-level = "turbo";
+ clock-rates = <24000000>;
+ };
};
&cam_cci1 {
@@ -221,9 +511,9 @@
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
- &cam_sensor_active_front>;
+ &cam_sensor_active_rst2>;
pinctrl-1 = <&cam_sensor_mclk2_suspend
- &cam_sensor_suspend_front>;
+ &cam_sensor_suspend_rst2>;
gpios = <&tlmm 96 0>,
<&tlmm 78 0>;
gpio-reset = <1>;
@@ -264,9 +554,9 @@
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
- &cam_sensor_active_front>;
+ &cam_sensor_active_rst2>;
pinctrl-1 = <&cam_sensor_mclk2_suspend
- &cam_sensor_suspend_front>;
+ &cam_sensor_suspend_rst2>;
gpios = <&tlmm 96 0>,
<&tlmm 78 0>;
gpio-reset = <1>;
diff --git a/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
index 1593128..01f5771 100644
--- a/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-camera-sensor-qrd.dtsi
@@ -221,9 +221,9 @@
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
- &cam_sensor_active_front>;
+ &cam_sensor_active_rst2>;
pinctrl-1 = <&cam_sensor_mclk2_suspend
- &cam_sensor_suspend_front>;
+ &cam_sensor_suspend_rst2>;
gpios = <&tlmm 96 0>,
<&tlmm 78 0>;
gpio-reset = <1>;
@@ -264,9 +264,9 @@
gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
- &cam_sensor_active_front>;
+ &cam_sensor_active_rst2>;
pinctrl-1 = <&cam_sensor_mclk2_suspend
- &cam_sensor_suspend_front>;
+ &cam_sensor_suspend_rst2>;
gpios = <&tlmm 96 0>,
<&tlmm 78 0>;
gpio-reset = <1>;
diff --git a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
index fbde9a3..417c45b 100644
--- a/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-pinctrl.dtsi
@@ -2267,8 +2267,8 @@
};
};
- cam_sensor_active_front: cam_sensor_active_front {
- /* RESET FRONT */
+ cam_sensor_active_rst2: cam_sensor_active_rst2 {
+ /* RESET 2 */
mux {
pins = "gpio78";
function = "gpio";
@@ -2281,8 +2281,8 @@
};
};
- cam_sensor_suspend_front: cam_sensor_suspend_front {
- /* RESET FRONT */
+ cam_sensor_suspend_rst2: cam_sensor_suspend_rst2 {
+ /* RESET 2 */
mux {
pins = "gpio78";
function = "gpio";
diff --git a/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi b/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
index 6149297..11c09eb 100644
--- a/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona-va-bolero.dtsi
@@ -15,7 +15,7 @@
qcom,va-dmic-sample-rate = <4800000>;
qcom,va-clk-mux-select = <1>;
qcom,va-island-mode-muxsel = <0x033A0000>;
- qcom,mux0-clk-id = <TX_CORE_CLK>;
+ qcom,default-clk-id = <TX_CORE_CLK>;
};
};
diff --git a/arch/arm64/boot/dts/qcom/kona.dtsi b/arch/arm64/boot/dts/qcom/kona.dtsi
index 8c1387b..b4cd30b 100644
--- a/arch/arm64/boot/dts/qcom/kona.dtsi
+++ b/arch/arm64/boot/dts/qcom/kona.dtsi
@@ -1867,9 +1867,11 @@
lanes-per-direction = <2>;
clock-names = "ref_clk_src",
+ "ref_clk",
"ref_aux_clk";
clocks = <&clock_rpmh RPMH_CXO_CLK>,
- <&clock_gcc GCC_UFS_PHY_PHY_AUX_HW_CTL_CLK>;
+ <&clock_gcc GCC_UFS_1X_CLKREF_EN>,
+ <&clock_gcc GCC_UFS_PHY_PHY_AUX_CLK>;
status = "disabled";
};
@@ -1896,11 +1898,11 @@
"rx_lane0_sync_clk",
"rx_lane1_sync_clk";
clocks =
- <&clock_gcc GCC_UFS_PHY_AXI_HW_CTL_CLK>,
- <&clock_gcc GCC_AGGRE_UFS_PHY_AXI_HW_CTL_CLK>,
+ <&clock_gcc GCC_UFS_PHY_AXI_CLK>,
+ <&clock_gcc GCC_AGGRE_UFS_PHY_AXI_CLK>,
<&clock_gcc GCC_UFS_PHY_AHB_CLK>,
- <&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_HW_CTL_CLK>,
- <&clock_gcc GCC_UFS_PHY_ICE_CORE_HW_CTL_CLK>,
+ <&clock_gcc GCC_UFS_PHY_UNIPRO_CORE_CLK>,
+ <&clock_gcc GCC_UFS_PHY_ICE_CORE_CLK>,
<&clock_rpmh RPMH_CXO_CLK>,
<&clock_gcc GCC_UFS_PHY_TX_SYMBOL_0_CLK>,
<&clock_gcc GCC_UFS_PHY_RX_SYMBOL_0_CLK>,
@@ -1910,7 +1912,7 @@
<0 0>,
<0 0>,
<37500000 300000000>,
- <37500000 300000000>,
+ <75000000 300000000>,
<0 0>,
<0 0>,
<0 0>,
@@ -2927,13 +2929,11 @@
compatible = "qcom,pil-tz-generic";
reg = <0x5c00000 0x4000>;
- vdd_cx-supply = <&VDD_CX_LEVEL>;
+ vdd_cx-supply = <&L11A_LEVEL>;
qcom,vdd_cx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 0>;
- vdd_mx-supply = <&VDD_MX_LEVEL>;
+ vdd_mx-supply = <&L4A_LEVEL>;
qcom,vdd_mx-uV-uA = <RPMH_REGULATOR_LEVEL_TURBO 0>;
-
qcom,proxy-reg-names = "vdd_cx", "vdd_mx";
- qcom,keep-proxy-regs-on;
clocks = <&clock_rpmh RPMH_CXO_CLK>;
clock-names = "xo";
@@ -2948,6 +2948,7 @@
status = "ok";
memory-region = <&pil_slpi_mem>;
qcom,complete-ramdump;
+ qcom,signal-aop;
/* Inputs from ssc */
interrupts-extended = <&pdc 9 IRQ_TYPE_LEVEL_HIGH>,
@@ -2966,6 +2967,7 @@
qcom,smem-states = <&dsps_smp2p_out 0>;
qcom,smem-state-names = "qcom,force-stop";
+ mboxes = <&qmp_aop 0>;
mbox-names = "slpi-pil";
};
@@ -3389,6 +3391,13 @@
mhi,brstmode = <2>;
};
};
+
+ mhi_devices {
+ mhi_qrtr {
+ mhi,chan = "IPCR";
+ qcom,net-id = <0>;
+ };
+ };
};
};
diff --git a/arch/arm64/boot/dts/qcom/lito-usb.dtsi b/arch/arm64/boot/dts/qcom/lito-usb.dtsi
index 7cc8b61..6cc33c0 100644
--- a/arch/arm64/boot/dts/qcom/lito-usb.dtsi
+++ b/arch/arm64/boot/dts/qcom/lito-usb.dtsi
@@ -12,7 +12,9 @@
reg = <0x0a600000 0x100000>;
reg-names = "core_base";
- qcom,iommu-dma = "disabled";
+ iommus = <&apps_smmu 0xE0 0x0>;
+ qcom,iommu-dma = "atomic";
+ qcom,iommu-dma-addr-pool = <0x90000000 0x60000000>;
#address-cells = <1>;
#size-cells = <1>;
ranges;
diff --git a/arch/arm64/boot/dts/qcom/msm8996.dtsi b/arch/arm64/boot/dts/qcom/msm8996.dtsi
index cd3865e..8c86c41 100644
--- a/arch/arm64/boot/dts/qcom/msm8996.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8996.dtsi
@@ -399,7 +399,7 @@
};
intc: interrupt-controller@9bc0000 {
- compatible = "arm,gic-v3";
+ compatible = "qcom,msm8996-gic-v3", "arm,gic-v3";
#interrupt-cells = <3>;
interrupt-controller;
#redistributor-regions = <1>;
diff --git a/arch/arm64/boot/dts/renesas/r8a7796.dtsi b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
index cbd35c0..33cb028 100644
--- a/arch/arm64/boot/dts/renesas/r8a7796.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a7796.dtsi
@@ -1161,6 +1161,9 @@
<&cpg CPG_CORE R8A7796_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+ <&dmac2 0x13>, <&dmac2 0x12>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A7796_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/renesas/r8a77965.dtsi b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
index 0cd4446..f60f08b 100644
--- a/arch/arm64/boot/dts/renesas/r8a77965.dtsi
+++ b/arch/arm64/boot/dts/renesas/r8a77965.dtsi
@@ -951,6 +951,9 @@
<&cpg CPG_CORE R8A77965_CLK_S3D1>,
<&scif_clk>;
clock-names = "fck", "brg_int", "scif_clk";
+ dmas = <&dmac1 0x13>, <&dmac1 0x12>,
+ <&dmac2 0x13>, <&dmac2 0x12>;
+ dma-names = "tx", "rx", "tx", "rx";
power-domains = <&sysc R8A77965_PD_ALWAYS_ON>;
resets = <&cpg 310>;
status = "disabled";
diff --git a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
index eb5e8bd..8954c8c 100644
--- a/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
+++ b/arch/arm64/boot/dts/xilinx/zynqmp-zcu100-revC.dts
@@ -101,6 +101,7 @@
sdio_pwrseq: sdio_pwrseq {
compatible = "mmc-pwrseq-simple";
reset-gpios = <&gpio 7 GPIO_ACTIVE_LOW>; /* WIFI_EN */
+ post-power-on-delay-ms = <10>;
};
};
diff --git a/arch/arm64/configs/cuttlefish_defconfig b/arch/arm64/configs/cuttlefish_defconfig
index d1ea96f..87a22fa 100644
--- a/arch/arm64/configs/cuttlefish_defconfig
+++ b/arch/arm64/configs/cuttlefish_defconfig
@@ -58,6 +58,7 @@
CONFIG_CPU_IDLE=y
CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
+CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
@@ -124,6 +125,7 @@
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -229,6 +231,7 @@
CONFIG_PPP_MPPE=y
CONFIG_PPTP=y
CONFIG_PPPOL2TP=y
+CONFIG_USB_RTL8152=y
CONFIG_USB_USBNET=y
# CONFIG_USB_NET_AX8817X is not set
# CONFIG_USB_NET_AX88179_178A is not set
@@ -299,6 +302,12 @@
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_SOUND=y
CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_INTEL8X0=y
+# CONFIG_SND_USB is not set
CONFIG_HIDRAW=y
CONFIG_UHID=y
CONFIG_HID_A4TECH=y
diff --git a/arch/arm64/configs/vendor/kona_defconfig b/arch/arm64/configs/vendor/kona_defconfig
index 2210bec..fbc5f84 100644
--- a/arch/arm64/configs/vendor/kona_defconfig
+++ b/arch/arm64/configs/vendor/kona_defconfig
@@ -650,6 +650,7 @@
CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_PAGE_OWNER=y
+CONFIG_PAGE_OWNER_ENABLE_DEFAULT=y
CONFIG_DEBUG_SECTION_MISMATCH=y
# CONFIG_SECTION_MISMATCH_WARN_ONLY is not set
CONFIG_DEBUG_FORCE_WEAK_PER_CPU=y
diff --git a/arch/arm64/kernel/probes/kprobes.c b/arch/arm64/kernel/probes/kprobes.c
index b5a367d..30bb137 100644
--- a/arch/arm64/kernel/probes/kprobes.c
+++ b/arch/arm64/kernel/probes/kprobes.c
@@ -478,13 +478,13 @@
addr < (unsigned long)__entry_text_end) ||
(addr >= (unsigned long)__idmap_text_start &&
addr < (unsigned long)__idmap_text_end) ||
+ (addr >= (unsigned long)__hyp_text_start &&
+ addr < (unsigned long)__hyp_text_end) ||
!!search_exception_tables(addr))
return true;
if (!is_kernel_in_hyp_mode()) {
- if ((addr >= (unsigned long)__hyp_text_start &&
- addr < (unsigned long)__hyp_text_end) ||
- (addr >= (unsigned long)__hyp_idmap_text_start &&
+ if ((addr >= (unsigned long)__hyp_idmap_text_start &&
addr < (unsigned long)__hyp_idmap_text_end))
return true;
}
diff --git a/arch/mips/boot/dts/ingenic/ci20.dts b/arch/mips/boot/dts/ingenic/ci20.dts
index 50cff3c..4f7b1fa 100644
--- a/arch/mips/boot/dts/ingenic/ci20.dts
+++ b/arch/mips/boot/dts/ingenic/ci20.dts
@@ -76,7 +76,7 @@
status = "okay";
pinctrl-names = "default";
- pinctrl-0 = <&pins_uart2>;
+ pinctrl-0 = <&pins_uart3>;
};
&uart4 {
@@ -196,9 +196,9 @@
bias-disable;
};
- pins_uart2: uart2 {
- function = "uart2";
- groups = "uart2-data", "uart2-hwflow";
+ pins_uart3: uart3 {
+ function = "uart3";
+ groups = "uart3-data", "uart3-hwflow";
bias-disable;
};
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index ba150c75..85b6c60 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -52,6 +52,7 @@
void __init init_IRQ(void)
{
int i;
+ unsigned int order = get_order(IRQ_STACK_SIZE);
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
@@ -62,8 +63,7 @@
arch_init_irq();
for_each_possible_cpu(i) {
- int irq_pages = IRQ_STACK_SIZE / PAGE_SIZE;
- void *s = (void *)__get_free_pages(GFP_KERNEL, irq_pages);
+ void *s = (void *)__get_free_pages(GFP_KERNEL, order);
irq_stack[i] = s;
pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index d4f7fd4..85522c1 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -371,7 +371,7 @@
static int get_frame_info(struct mips_frame_info *info)
{
bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
- union mips_instruction insn, *ip, *ip_end;
+ union mips_instruction insn, *ip;
const unsigned int max_insns = 128;
unsigned int last_insn_size = 0;
unsigned int i;
@@ -384,10 +384,9 @@
if (!ip)
goto err;
- ip_end = (void *)ip + info->func_size;
-
- for (i = 0; i < max_insns && ip < ip_end; i++) {
+ for (i = 0; i < max_insns; i++) {
ip = (void *)ip + last_insn_size;
+
if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
insn.word = ip->halfword[0] << 16;
last_insn_size = 2;
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index 3fe4af8..c23578a 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -22,7 +22,7 @@
* This decides where the kernel will search for a free chunk of vm
* space during mmap's.
*/
-#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE >> 1)
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
#define STACK_TOP TASK_SIZE
#define STACK_TOP_MAX STACK_TOP
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index b2d26d9..9713d4e 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -186,7 +186,7 @@
BUG_ON(mem_size == 0);
set_max_mapnr(PFN_DOWN(mem_size));
- max_low_pfn = memblock_end_of_DRAM();
+ max_low_pfn = PFN_DOWN(memblock_end_of_DRAM());
#ifdef CONFIG_BLK_DEV_INITRD
setup_initrd();
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 58a522f..200a4b3 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -29,7 +29,8 @@
unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G, max_low_pfn));
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(min(4UL * SZ_1G,
+ (unsigned long) PFN_PHYS(max_low_pfn)));
#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
index 6403789..f105ae8 100644
--- a/arch/x86/boot/compressed/head_64.S
+++ b/arch/x86/boot/compressed/head_64.S
@@ -600,6 +600,14 @@
leal TRAMPOLINE_32BIT_PGTABLE_OFFSET(%ecx), %eax
movl %eax, %cr3
3:
+ /* Set EFER.LME=1 as a precaution in case hypervsior pulls the rug */
+ pushl %ecx
+ movl $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+ popl %ecx
+
/* Enable PAE and LA57 (if required) paging modes */
movl $X86_CR4_PAE, %eax
cmpl $0, %edx
diff --git a/arch/x86/boot/compressed/pgtable.h b/arch/x86/boot/compressed/pgtable.h
index 91f7563..6ff7e81 100644
--- a/arch/x86/boot/compressed/pgtable.h
+++ b/arch/x86/boot/compressed/pgtable.h
@@ -6,7 +6,7 @@
#define TRAMPOLINE_32BIT_PGTABLE_OFFSET 0
#define TRAMPOLINE_32BIT_CODE_OFFSET PAGE_SIZE
-#define TRAMPOLINE_32BIT_CODE_SIZE 0x60
+#define TRAMPOLINE_32BIT_CODE_SIZE 0x70
#define TRAMPOLINE_32BIT_STACK_END TRAMPOLINE_32BIT_SIZE
diff --git a/arch/x86/boot/compressed/pgtable_64.c b/arch/x86/boot/compressed/pgtable_64.c
index 9e21573..f8debf7 100644
--- a/arch/x86/boot/compressed/pgtable_64.c
+++ b/arch/x86/boot/compressed/pgtable_64.c
@@ -1,5 +1,7 @@
+#include <linux/efi.h>
#include <asm/e820/types.h>
#include <asm/processor.h>
+#include <asm/efi.h>
#include "pgtable.h"
#include "../string.h"
@@ -37,9 +39,10 @@
static unsigned long find_trampoline_placement(void)
{
- unsigned long bios_start, ebda_start;
+ unsigned long bios_start = 0, ebda_start = 0;
unsigned long trampoline_start;
struct boot_e820_entry *entry;
+ char *signature;
int i;
/*
@@ -47,8 +50,18 @@
* This code is based on reserve_bios_regions().
*/
- ebda_start = *(unsigned short *)0x40e << 4;
- bios_start = *(unsigned short *)0x413 << 10;
+ /*
+ * EFI systems may not provide legacy ROM. The memory may not be mapped
+ * at all.
+ *
+ * Only look for values in the legacy ROM for non-EFI system.
+ */
+ signature = (char *)&boot_params->efi_info.efi_loader_signature;
+ if (strncmp(signature, EFI32_LOADER_SIGNATURE, 4) &&
+ strncmp(signature, EFI64_LOADER_SIGNATURE, 4)) {
+ ebda_start = *(unsigned short *)0x40e << 4;
+ bios_start = *(unsigned short *)0x413 << 10;
+ }
if (bios_start < BIOS_START_MIN || bios_start > BIOS_START_MAX)
bios_start = BIOS_START_MAX;
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index 8229cf8..4b536d0 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -58,6 +58,7 @@
# CONFIG_ACPI_FAN is not set
# CONFIG_ACPI_THERMAL is not set
# CONFIG_X86_PM_TIMER is not set
+CONFIG_CPU_FREQ_TIMES=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_X86_ACPI_CPUFREQ=y
CONFIG_PCI_MSI=y
@@ -96,6 +97,7 @@
CONFIG_NET_IPVTI=y
CONFIG_INET_ESP=y
# CONFIG_INET_XFRM_MODE_BEET is not set
+CONFIG_INET_UDP_DIAG=y
CONFIG_INET_DIAG_DESTROY=y
CONFIG_TCP_CONG_ADVANCED=y
# CONFIG_TCP_CONG_BIC is not set
@@ -128,6 +130,7 @@
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y
+CONFIG_NETFILTER_XT_TARGET_CT=y
CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y
CONFIG_NETFILTER_XT_TARGET_MARK=y
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
@@ -234,6 +237,7 @@
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
CONFIG_PPP_MPPE=y
+CONFIG_USB_RTL8152=y
CONFIG_USB_USBNET=y
# CONFIG_USB_NET_AX8817X is not set
# CONFIG_USB_NET_AX88179_178A is not set
@@ -311,6 +315,12 @@
CONFIG_DRM_VIRTIO_GPU=y
CONFIG_SOUND=y
CONFIG_SND=y
+CONFIG_SND_HRTIMER=y
+# CONFIG_SND_SUPPORT_OLD_API is not set
+# CONFIG_SND_VERBOSE_PROCFS is not set
+# CONFIG_SND_DRIVERS is not set
+CONFIG_SND_INTEL8X0=y
+# CONFIG_SND_USB is not set
CONFIG_HIDRAW=y
CONFIG_UHID=y
CONFIG_HID_A4TECH=y
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index c04a881..a415543 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -1970,7 +1970,7 @@
*/
static void free_fake_cpuc(struct cpu_hw_events *cpuc)
{
- kfree(cpuc->shared_regs);
+ intel_cpuc_finish(cpuc);
kfree(cpuc);
}
@@ -1982,14 +1982,11 @@
cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
if (!cpuc)
return ERR_PTR(-ENOMEM);
-
- /* only needed, if we have extra_regs */
- if (x86_pmu.extra_regs) {
- cpuc->shared_regs = allocate_shared_regs(cpu);
- if (!cpuc->shared_regs)
- goto error;
- }
cpuc->is_fake = 1;
+
+ if (intel_cpuc_prepare(cpuc, cpu))
+ goto error;
+
return cpuc;
error:
free_fake_cpuc(cpuc);
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index fbd7551..12453cf 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -1995,6 +1995,39 @@
intel_pmu_enable_all(added);
}
+static void intel_set_tfa(struct cpu_hw_events *cpuc, bool on)
+{
+ u64 val = on ? MSR_TFA_RTM_FORCE_ABORT : 0;
+
+ if (cpuc->tfa_shadow != val) {
+ cpuc->tfa_shadow = val;
+ wrmsrl(MSR_TSX_FORCE_ABORT, val);
+ }
+}
+
+static void intel_tfa_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
+{
+ /*
+ * We're going to use PMC3, make sure TFA is set before we touch it.
+ */
+ if (cntr == 3 && !cpuc->is_fake)
+ intel_set_tfa(cpuc, true);
+}
+
+static void intel_tfa_pmu_enable_all(int added)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ /*
+ * If we find PMC3 is no longer used when we enable the PMU, we can
+ * clear TFA.
+ */
+ if (!test_bit(3, cpuc->active_mask))
+ intel_set_tfa(cpuc, false);
+
+ intel_pmu_enable_all(added);
+}
+
static inline u64 intel_pmu_get_status(void)
{
u64 status;
@@ -2653,6 +2686,35 @@
}
static struct event_constraint *
+dyn_constraint(struct cpu_hw_events *cpuc, struct event_constraint *c, int idx)
+{
+ WARN_ON_ONCE(!cpuc->constraint_list);
+
+ if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
+ struct event_constraint *cx;
+
+ /*
+ * grab pre-allocated constraint entry
+ */
+ cx = &cpuc->constraint_list[idx];
+
+ /*
+ * initialize dynamic constraint
+ * with static constraint
+ */
+ *cx = *c;
+
+ /*
+ * mark constraint as dynamic
+ */
+ cx->flags |= PERF_X86_EVENT_DYNAMIC;
+ c = cx;
+ }
+
+ return c;
+}
+
+static struct event_constraint *
intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
int idx, struct event_constraint *c)
{
@@ -2682,27 +2744,7 @@
* only needed when constraint has not yet
* been cloned (marked dynamic)
*/
- if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
- struct event_constraint *cx;
-
- /*
- * grab pre-allocated constraint entry
- */
- cx = &cpuc->constraint_list[idx];
-
- /*
- * initialize dynamic constraint
- * with static constraint
- */
- *cx = *c;
-
- /*
- * mark constraint as dynamic, so we
- * can free it later on
- */
- cx->flags |= PERF_X86_EVENT_DYNAMIC;
- c = cx;
- }
+ c = dyn_constraint(cpuc, c, idx);
/*
* From here on, the constraint is dynamic.
@@ -3229,6 +3271,26 @@
return c;
}
+static bool allow_tsx_force_abort = true;
+
+static struct event_constraint *
+tfa_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
+ struct perf_event *event)
+{
+ struct event_constraint *c = hsw_get_event_constraints(cpuc, idx, event);
+
+ /*
+ * Without TFA we must not use PMC3.
+ */
+ if (!allow_tsx_force_abort && test_bit(3, c->idxmsk) && idx >= 0) {
+ c = dyn_constraint(cpuc, c, idx);
+ c->idxmsk64 &= ~(1ULL << 3);
+ c->weight--;
+ }
+
+ return c;
+}
+
/*
* Broadwell:
*
@@ -3282,7 +3344,7 @@
return x86_event_sysfs_show(page, config, event);
}
-struct intel_shared_regs *allocate_shared_regs(int cpu)
+static struct intel_shared_regs *allocate_shared_regs(int cpu)
{
struct intel_shared_regs *regs;
int i;
@@ -3314,23 +3376,24 @@
return c;
}
-static int intel_pmu_cpu_prepare(int cpu)
-{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
+int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
+{
if (x86_pmu.extra_regs || x86_pmu.lbr_sel_map) {
cpuc->shared_regs = allocate_shared_regs(cpu);
if (!cpuc->shared_regs)
goto err;
}
- if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
+ if (x86_pmu.flags & (PMU_FL_EXCL_CNTRS | PMU_FL_TFA)) {
size_t sz = X86_PMC_IDX_MAX * sizeof(struct event_constraint);
- cpuc->constraint_list = kzalloc(sz, GFP_KERNEL);
+ cpuc->constraint_list = kzalloc_node(sz, GFP_KERNEL, cpu_to_node(cpu));
if (!cpuc->constraint_list)
goto err_shared_regs;
+ }
+ if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
cpuc->excl_cntrs = allocate_excl_cntrs(cpu);
if (!cpuc->excl_cntrs)
goto err_constraint_list;
@@ -3352,6 +3415,11 @@
return -ENOMEM;
}
+static int intel_pmu_cpu_prepare(int cpu)
+{
+ return intel_cpuc_prepare(&per_cpu(cpu_hw_events, cpu), cpu);
+}
+
static void flip_smm_bit(void *data)
{
unsigned long set = *(unsigned long *)data;
@@ -3423,9 +3491,8 @@
}
}
-static void free_excl_cntrs(int cpu)
+static void free_excl_cntrs(struct cpu_hw_events *cpuc)
{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_excl_cntrs *c;
c = cpuc->excl_cntrs;
@@ -3433,9 +3500,10 @@
if (c->core_id == -1 || --c->refcnt == 0)
kfree(c);
cpuc->excl_cntrs = NULL;
- kfree(cpuc->constraint_list);
- cpuc->constraint_list = NULL;
}
+
+ kfree(cpuc->constraint_list);
+ cpuc->constraint_list = NULL;
}
static void intel_pmu_cpu_dying(int cpu)
@@ -3443,9 +3511,8 @@
fini_debug_store_on_cpu(cpu);
}
-static void intel_pmu_cpu_dead(int cpu)
+void intel_cpuc_finish(struct cpu_hw_events *cpuc)
{
- struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
struct intel_shared_regs *pc;
pc = cpuc->shared_regs;
@@ -3455,7 +3522,12 @@
cpuc->shared_regs = NULL;
}
- free_excl_cntrs(cpu);
+ free_excl_cntrs(cpuc);
+}
+
+static void intel_pmu_cpu_dead(int cpu)
+{
+ intel_cpuc_finish(&per_cpu(cpu_hw_events, cpu));
}
static void intel_pmu_sched_task(struct perf_event_context *ctx,
@@ -3917,8 +3989,11 @@
NULL
};
+static DEVICE_BOOL_ATTR(allow_tsx_force_abort, 0644, allow_tsx_force_abort);
+
static struct attribute *intel_pmu_attrs[] = {
&dev_attr_freeze_on_smi.attr,
+ NULL, /* &dev_attr_allow_tsx_force_abort.attr.attr */
NULL,
};
@@ -4374,6 +4449,15 @@
x86_pmu.cpu_events = get_hsw_events_attrs();
intel_pmu_pebs_data_source_skl(
boot_cpu_data.x86_model == INTEL_FAM6_SKYLAKE_X);
+
+ if (boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)) {
+ x86_pmu.flags |= PMU_FL_TFA;
+ x86_pmu.get_event_constraints = tfa_get_event_constraints;
+ x86_pmu.enable_all = intel_tfa_pmu_enable_all;
+ x86_pmu.commit_scheduling = intel_tfa_commit_scheduling;
+ intel_pmu_attrs[1] = &dev_attr_allow_tsx_force_abort.attr.attr;
+ }
+
pr_cont("Skylake events, ");
name = "skylake";
break;
@@ -4515,7 +4599,7 @@
hardlockup_detector_perf_restart();
for_each_online_cpu(c)
- free_excl_cntrs(c);
+ free_excl_cntrs(&per_cpu(cpu_hw_events, c));
cpus_read_unlock();
pr_info("PMU erratum BJ122, BV98, HSD29 workaround disabled, HT off\n");
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 0ee3a44..42a3628 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -243,6 +243,11 @@
int excl_thread_id; /* 0 or 1 */
/*
+ * SKL TSX_FORCE_ABORT shadow
+ */
+ u64 tfa_shadow;
+
+ /*
* AMD specific bits
*/
struct amd_nb *amd_nb;
@@ -679,6 +684,7 @@
#define PMU_FL_EXCL_CNTRS 0x4 /* has exclusive counter requirements */
#define PMU_FL_EXCL_ENABLED 0x8 /* exclusive counter active */
#define PMU_FL_PEBS_ALL 0x10 /* all events are valid PEBS events */
+#define PMU_FL_TFA 0x20 /* deal with TSX force abort */
#define EVENT_VAR(_id) event_attr_##_id
#define EVENT_PTR(_id) &event_attr_##_id.attr.attr
@@ -887,7 +893,8 @@
x86_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
struct perf_event *event);
-struct intel_shared_regs *allocate_shared_regs(int cpu);
+extern int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu);
+extern void intel_cpuc_finish(struct cpu_hw_events *cpuc);
int intel_pmu_init(void);
@@ -1023,9 +1030,13 @@
return 0;
}
-static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
+static inline int intel_cpuc_prepare(struct cpu_hw_events *cpuc, int cpu)
{
- return NULL;
+ return 0;
+}
+
+static inline void intel_cpuc_finish(struct cpu_hw_events *cpuc)
+{
}
static inline int is_ht_workaround_enabled(void)
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index 89a048c..7b31ee5 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -340,6 +340,7 @@
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
+#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 1f9de76..f14ca0b 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -629,6 +629,12 @@
#define MSR_IA32_TSC_DEADLINE 0x000006E0
+
+#define MSR_TSX_FORCE_ABORT 0x0000010F
+
+#define MSR_TFA_RTM_FORCE_ABORT_BIT 0
+#define MSR_TFA_RTM_FORCE_ABORT BIT_ULL(MSR_TFA_RTM_FORCE_ABORT_BIT)
+
/* P4/Xeon+ specific */
#define MSR_IA32_MCG_EAX 0x00000180
#define MSR_IA32_MCG_EBX 0x00000181
diff --git a/arch/x86/include/asm/page_64_types.h b/arch/x86/include/asm/page_64_types.h
index b99d497..0b6352a 100644
--- a/arch/x86/include/asm/page_64_types.h
+++ b/arch/x86/include/asm/page_64_types.h
@@ -7,7 +7,11 @@
#endif
#ifdef CONFIG_KASAN
+#ifdef CONFIG_KASAN_EXTRA
+#define KASAN_STACK_ORDER 2
+#else
#define KASAN_STACK_ORDER 1
+#endif
#else
#define KASAN_STACK_ORDER 0
#endif
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index eeea634..6a25278 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -818,11 +818,9 @@
static void init_amd_zn(struct cpuinfo_x86 *c)
{
set_cpu_cap(c, X86_FEATURE_ZEN);
- /*
- * Fix erratum 1076: CPB feature bit not being set in CPUID. It affects
- * all up to and including B1.
- */
- if (c->x86_model <= 1 && c->x86_stepping <= 1)
+
+ /* Fix erratum 1076: CPB feature bit not being set in CPUID. */
+ if (!cpu_has(c, X86_FEATURE_CPB))
set_cpu_cap(c, X86_FEATURE_CPB);
}
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 07b5fc0..a4e7e10 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -707,7 +707,7 @@
if (!p) {
return ret;
} else {
- if (boot_cpu_data.microcode == p->patch_id)
+ if (boot_cpu_data.microcode >= p->patch_id)
return ret;
ret = UCODE_NEW;
diff --git a/arch/x86/kernel/kexec-bzimage64.c b/arch/x86/kernel/kexec-bzimage64.c
index 278cd07..9490a28 100644
--- a/arch/x86/kernel/kexec-bzimage64.c
+++ b/arch/x86/kernel/kexec-bzimage64.c
@@ -167,6 +167,9 @@
struct efi_info *current_ei = &boot_params.efi_info;
struct efi_info *ei = ¶ms->efi_info;
+ if (!efi_enabled(EFI_RUNTIME_SERVICES))
+ return 0;
+
if (!current_ei->efi_memmap_size)
return 0;
diff --git a/arch/x86/pci/fixup.c b/arch/x86/pci/fixup.c
index 13f4485..bd372e8 100644
--- a/arch/x86/pci/fixup.c
+++ b/arch/x86/pci/fixup.c
@@ -641,6 +641,22 @@
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334c, quirk_no_aersid);
DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, 0x334d, quirk_no_aersid);
+static void quirk_intel_th_dnv(struct pci_dev *dev)
+{
+ struct resource *r = &dev->resource[4];
+
+ /*
+ * Denverton reports 2k of RTIT_BAR (intel_th resource 4), which
+ * appears to be 4 MB in reality.
+ */
+ if (r->end == r->start + 0x7ff) {
+ r->start = 0;
+ r->end = 0x3fffff;
+ r->flags |= IORESOURCE_UNSET;
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x19e1, quirk_intel_th_dnv);
+
#ifdef CONFIG_PHYS_ADDR_T_64BIT
#define AMD_141b_MMIO_BASE(x) (0x80 + (x) * 0x8)
diff --git a/arch/xtensa/configs/smp_lx200_defconfig b/arch/xtensa/configs/smp_lx200_defconfig
index 11fed6c..b593816 100644
--- a/arch/xtensa/configs/smp_lx200_defconfig
+++ b/arch/xtensa/configs/smp_lx200_defconfig
@@ -33,6 +33,7 @@
CONFIG_HOTPLUG_CPU=y
# CONFIG_INITIALIZE_XTENSA_MMU_INSIDE_VMLINUX is not set
# CONFIG_PCI is not set
+CONFIG_VECTORS_OFFSET=0x00002000
CONFIG_XTENSA_PLATFORM_XTFPGA=y
CONFIG_CMDLINE_BOOL=y
CONFIG_CMDLINE="earlycon=uart8250,mmio32native,0xfd050020,115200n8 console=ttyS0,115200n8 ip=dhcp root=/dev/nfs rw debug memmap=96M@0"
diff --git a/arch/xtensa/kernel/head.S b/arch/xtensa/kernel/head.S
index 9053a56..5bd38ea 100644
--- a/arch/xtensa/kernel/head.S
+++ b/arch/xtensa/kernel/head.S
@@ -280,12 +280,13 @@
movi a2, cpu_start_ccount
1:
+ memw
l32i a3, a2, 0
beqi a3, 0, 1b
movi a3, 0
s32i a3, a2, 0
- memw
1:
+ memw
l32i a3, a2, 0
beqi a3, 0, 1b
wsr a3, ccount
@@ -321,11 +322,13 @@
rsr a0, prid
neg a2, a0
movi a3, cpu_start_id
+ memw
s32i a2, a3, 0
#if XCHAL_DCACHE_IS_WRITEBACK
dhwbi a3, 0
#endif
1:
+ memw
l32i a2, a3, 0
dhi a3, 0
bne a2, a0, 1b
diff --git a/arch/xtensa/kernel/process.c b/arch/xtensa/kernel/process.c
index 4bb6813..5a0e0bd 100644
--- a/arch/xtensa/kernel/process.c
+++ b/arch/xtensa/kernel/process.c
@@ -320,8 +320,8 @@
/* Stack layout: sp-4: ra, sp-3: sp' */
- pc = MAKE_PC_FROM_RA(*(unsigned long*)sp - 4, sp);
- sp = *(unsigned long *)sp - 3;
+ pc = MAKE_PC_FROM_RA(SPILL_SLOT(sp, 0), sp);
+ sp = SPILL_SLOT(sp, 1);
} while (count++ < 16);
return 0;
}
diff --git a/arch/xtensa/kernel/smp.c b/arch/xtensa/kernel/smp.c
index 932d646..be1f280 100644
--- a/arch/xtensa/kernel/smp.c
+++ b/arch/xtensa/kernel/smp.c
@@ -83,7 +83,7 @@
{
unsigned i;
- for (i = 0; i < max_cpus; ++i)
+ for_each_possible_cpu(i)
set_cpu_present(i, true);
}
@@ -96,6 +96,11 @@
pr_info("%s: Core Count = %d\n", __func__, ncpus);
pr_info("%s: Core Id = %d\n", __func__, core_id);
+ if (ncpus > NR_CPUS) {
+ ncpus = NR_CPUS;
+ pr_info("%s: limiting core count by %d\n", __func__, ncpus);
+ }
+
for (i = 0; i < ncpus; ++i)
set_cpu_possible(i, true);
}
@@ -195,9 +200,11 @@
int i;
#ifdef CONFIG_HOTPLUG_CPU
- cpu_start_id = cpu;
- system_flush_invalidate_dcache_range(
- (unsigned long)&cpu_start_id, sizeof(cpu_start_id));
+ WRITE_ONCE(cpu_start_id, cpu);
+ /* Pairs with the third memw in the cpu_restart */
+ mb();
+ system_flush_invalidate_dcache_range((unsigned long)&cpu_start_id,
+ sizeof(cpu_start_id));
#endif
smp_call_function_single(0, mx_cpu_start, (void *)cpu, 1);
@@ -206,18 +213,21 @@
ccount = get_ccount();
while (!ccount);
- cpu_start_ccount = ccount;
+ WRITE_ONCE(cpu_start_ccount, ccount);
- while (time_before(jiffies, timeout)) {
+ do {
+ /*
+ * Pairs with the first two memws in the
+ * .Lboot_secondary.
+ */
mb();
- if (!cpu_start_ccount)
- break;
- }
+ ccount = READ_ONCE(cpu_start_ccount);
+ } while (ccount && time_before(jiffies, timeout));
- if (cpu_start_ccount) {
+ if (ccount) {
smp_call_function_single(0, mx_cpu_stop,
- (void *)cpu, 1);
- cpu_start_ccount = 0;
+ (void *)cpu, 1);
+ WRITE_ONCE(cpu_start_ccount, 0);
return -EIO;
}
}
@@ -237,6 +247,7 @@
pr_debug("%s: Calling wakeup_secondary(cpu:%d, idle:%p, sp: %08lx)\n",
__func__, cpu, idle, start_info.stack);
+ init_completion(&cpu_running);
ret = boot_secondary(cpu, idle);
if (ret == 0) {
wait_for_completion_timeout(&cpu_running,
@@ -298,8 +309,10 @@
unsigned long timeout = jiffies + msecs_to_jiffies(1000);
while (time_before(jiffies, timeout)) {
system_invalidate_dcache_range((unsigned long)&cpu_start_id,
- sizeof(cpu_start_id));
- if (cpu_start_id == -cpu) {
+ sizeof(cpu_start_id));
+ /* Pairs with the second memw in the cpu_restart */
+ mb();
+ if (READ_ONCE(cpu_start_id) == -cpu) {
platform_cpu_kill(cpu);
return;
}
diff --git a/arch/xtensa/kernel/time.c b/arch/xtensa/kernel/time.c
index fd524a5..378186b 100644
--- a/arch/xtensa/kernel/time.c
+++ b/arch/xtensa/kernel/time.c
@@ -89,7 +89,7 @@
container_of(evt, struct ccount_timer, evt);
if (timer->irq_enabled) {
- disable_irq(evt->irq);
+ disable_irq_nosync(evt->irq);
timer->irq_enabled = 0;
}
return 0;
diff --git a/block/blk-iolatency.c b/block/blk-iolatency.c
index 19923f8..b154e05 100644
--- a/block/blk-iolatency.c
+++ b/block/blk-iolatency.c
@@ -72,6 +72,7 @@
#include <linux/sched/loadavg.h>
#include <linux/sched/signal.h>
#include <trace/events/block.h>
+#include <linux/blk-mq.h>
#include "blk-rq-qos.h"
#include "blk-stat.h"
@@ -568,6 +569,9 @@
return;
enabled = blk_iolatency_enabled(iolat->blkiolat);
+ if (!enabled)
+ return;
+
while (blkg && blkg->parent) {
iolat = blkg_to_lat(blkg);
if (!iolat) {
@@ -577,7 +581,7 @@
rqw = &iolat->rq_wait;
atomic_dec(&rqw->inflight);
- if (!enabled || iolat->min_lat_nsec == 0)
+ if (iolat->min_lat_nsec == 0)
goto next;
iolatency_record_time(iolat, &bio->bi_issue, now,
issue_as_root);
@@ -721,10 +725,13 @@
return 0;
}
-static void iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
+/*
+ * return 1 for enabling iolatency, return -1 for disabling iolatency, otherwise
+ * return 0.
+ */
+static int iolatency_set_min_lat_nsec(struct blkcg_gq *blkg, u64 val)
{
struct iolatency_grp *iolat = blkg_to_lat(blkg);
- struct blk_iolatency *blkiolat = iolat->blkiolat;
u64 oldval = iolat->min_lat_nsec;
iolat->min_lat_nsec = val;
@@ -733,9 +740,10 @@
BLKIOLATENCY_MAX_WIN_SIZE);
if (!oldval && val)
- atomic_inc(&blkiolat->enabled);
+ return 1;
if (oldval && !val)
- atomic_dec(&blkiolat->enabled);
+ return -1;
+ return 0;
}
static void iolatency_clear_scaling(struct blkcg_gq *blkg)
@@ -768,6 +776,7 @@
u64 lat_val = 0;
u64 oldval;
int ret;
+ int enable = 0;
ret = blkg_conf_prep(blkcg, &blkcg_policy_iolatency, buf, &ctx);
if (ret)
@@ -803,7 +812,12 @@
blkg = ctx.blkg;
oldval = iolat->min_lat_nsec;
- iolatency_set_min_lat_nsec(blkg, lat_val);
+ enable = iolatency_set_min_lat_nsec(blkg, lat_val);
+ if (enable) {
+ WARN_ON_ONCE(!blk_get_queue(blkg->q));
+ blkg_get(blkg);
+ }
+
if (oldval != iolat->min_lat_nsec) {
iolatency_clear_scaling(blkg);
}
@@ -811,6 +825,24 @@
ret = 0;
out:
blkg_conf_finish(&ctx);
+ if (ret == 0 && enable) {
+ struct iolatency_grp *tmp = blkg_to_lat(blkg);
+ struct blk_iolatency *blkiolat = tmp->blkiolat;
+
+ blk_mq_freeze_queue(blkg->q);
+
+ if (enable == 1)
+ atomic_inc(&blkiolat->enabled);
+ else if (enable == -1)
+ atomic_dec(&blkiolat->enabled);
+ else
+ WARN_ON_ONCE(1);
+
+ blk_mq_unfreeze_queue(blkg->q);
+
+ blkg_put(blkg);
+ blk_put_queue(blkg->q);
+ }
return ret ?: nbytes;
}
@@ -910,8 +942,14 @@
{
struct iolatency_grp *iolat = pd_to_lat(pd);
struct blkcg_gq *blkg = lat_to_blkg(iolat);
+ struct blk_iolatency *blkiolat = iolat->blkiolat;
+ int ret;
- iolatency_set_min_lat_nsec(blkg, 0);
+ ret = iolatency_set_min_lat_nsec(blkg, 0);
+ if (ret == 1)
+ atomic_inc(&blkiolat->enabled);
+ if (ret == -1)
+ atomic_dec(&blkiolat->enabled);
iolatency_clear_scaling(blkg);
}
diff --git a/drivers/base/dd.c b/drivers/base/dd.c
index 467c43c..e378af5 100644
--- a/drivers/base/dd.c
+++ b/drivers/base/dd.c
@@ -984,9 +984,9 @@
drv->remove(dev);
device_links_driver_cleanup(dev);
- dma_deconfigure(dev);
devres_release_all(dev);
+ dma_deconfigure(dev);
dev->driver = NULL;
dev_set_drvdata(dev, NULL);
if (dev->pm_domain && dev->pm_domain->dismiss)
diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c
index 7f9ea8e..1342f8e 100644
--- a/drivers/bluetooth/btrtl.c
+++ b/drivers/bluetooth/btrtl.c
@@ -544,10 +544,9 @@
hdev->bus);
if (!btrtl_dev->ic_info) {
- rtl_dev_err(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
+ rtl_dev_info(hdev, "rtl: unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x",
lmp_subver, hci_rev, hci_ver);
- ret = -EINVAL;
- goto err_free;
+ return btrtl_dev;
}
if (btrtl_dev->ic_info->has_rom_version) {
@@ -602,6 +601,11 @@
* standard btusb. Once that firmware is uploaded, the subver changes
* to a different value.
*/
+ if (!btrtl_dev->ic_info) {
+ rtl_dev_info(hdev, "rtl: assuming no firmware upload needed\n");
+ return 0;
+ }
+
switch (btrtl_dev->ic_info->lmp_subver) {
case RTL_ROM_LMP_8723A:
case RTL_ROM_LMP_3499:
diff --git a/drivers/char/applicom.c b/drivers/char/applicom.c
index c0a5b1f..4ccc39e 100644
--- a/drivers/char/applicom.c
+++ b/drivers/char/applicom.c
@@ -32,6 +32,7 @@
#include <linux/wait.h>
#include <linux/init.h>
#include <linux/fs.h>
+#include <linux/nospec.h>
#include <asm/io.h>
#include <linux/uaccess.h>
@@ -386,7 +387,11 @@
TicCard = st_loc.tic_des_from_pc; /* tic number to send */
IndexCard = NumCard - 1;
- if((NumCard < 1) || (NumCard > MAX_BOARD) || !apbs[IndexCard].RamIO)
+ if (IndexCard >= MAX_BOARD)
+ return -EINVAL;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (!apbs[IndexCard].RamIO)
return -EINVAL;
#ifdef DEBUG
@@ -697,6 +702,7 @@
unsigned char IndexCard;
void __iomem *pmem;
int ret = 0;
+ static int warncount = 10;
volatile unsigned char byte_reset_it;
struct st_ram_io *adgl;
void __user *argp = (void __user *)arg;
@@ -711,16 +717,12 @@
mutex_lock(&ac_mutex);
IndexCard = adgl->num_card-1;
- if(cmd != 6 && ((IndexCard >= MAX_BOARD) || !apbs[IndexCard].RamIO)) {
- static int warncount = 10;
- if (warncount) {
- printk( KERN_WARNING "APPLICOM driver IOCTL, bad board number %d\n",(int)IndexCard+1);
- warncount--;
- }
- kfree(adgl);
- mutex_unlock(&ac_mutex);
- return -EINVAL;
- }
+ if (cmd != 6 && IndexCard >= MAX_BOARD)
+ goto err;
+ IndexCard = array_index_nospec(IndexCard, MAX_BOARD);
+
+ if (cmd != 6 && !apbs[IndexCard].RamIO)
+ goto err;
switch (cmd) {
@@ -838,5 +840,16 @@
kfree(adgl);
mutex_unlock(&ac_mutex);
return 0;
+
+err:
+ if (warncount) {
+ pr_warn("APPLICOM driver IOCTL, bad board number %d\n",
+ (int)IndexCard + 1);
+ warncount--;
+ }
+ kfree(adgl);
+ mutex_unlock(&ac_mutex);
+ return -EINVAL;
+
}
diff --git a/drivers/char/diag/diagfwd_socket.c b/drivers/char/diag/diagfwd_socket.c
index 8d216d4..27c1f64 100644
--- a/drivers/char/diag/diagfwd_socket.c
+++ b/drivers/char/diag/diagfwd_socket.c
@@ -712,7 +712,12 @@
if (err || pkt_len < 0)
break;
spin_lock_irqsave(&info->lock, flags);
- info->data_ready--;
+ if (info->data_ready > 0) {
+ info->data_ready--;
+ } else {
+ spin_unlock_irqrestore(&info->lock, flags);
+ break;
+ }
spin_unlock_irqrestore(&info->lock, flags);
read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
pkt_len, MSG_DONTWAIT);
@@ -808,7 +813,13 @@
}
spin_lock_irqsave(&info->lock, flags);
- info->data_ready--;
+ if (info->data_ready > 0) {
+ info->data_ready--;
+ } else {
+ spin_unlock_irqrestore(&info->lock, flags);
+ mutex_unlock(&info->socket_info_mutex);
+ break;
+ }
spin_unlock_irqrestore(&info->lock, flags);
read_len = kernel_recvmsg(info->hdl, &read_msg, &iov, 1,
diff --git a/drivers/clk/qcom/gcc-sdm845.c b/drivers/clk/qcom/gcc-sdm845.c
index fa1a196..3bf11a6 100644
--- a/drivers/clk/qcom/gcc-sdm845.c
+++ b/drivers/clk/qcom/gcc-sdm845.c
@@ -131,8 +131,8 @@
"core_bi_pll_test_se",
};
-static const char * const gcc_parent_names_7[] = {
- "bi_tcxo",
+static const char * const gcc_parent_names_7_ao[] = {
+ "bi_tcxo_ao",
"gpll0",
"gpll0_out_even",
"core_bi_pll_test_se",
@@ -144,6 +144,12 @@
"core_bi_pll_test_se",
};
+static const char * const gcc_parent_names_8_ao[] = {
+ "bi_tcxo_ao",
+ "gpll0",
+ "core_bi_pll_test_se",
+};
+
static const struct parent_map gcc_parent_map_10[] = {
{ P_BI_TCXO, 0 },
{ P_GPLL0_OUT_MAIN, 1 },
@@ -226,7 +232,7 @@
.freq_tbl = ftbl_gcc_cpuss_ahb_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_ahb_clk_src",
- .parent_names = gcc_parent_names_7,
+ .parent_names = gcc_parent_names_7_ao,
.num_parents = 4,
.ops = &clk_rcg2_ops,
},
@@ -245,7 +251,7 @@
.freq_tbl = ftbl_gcc_cpuss_rbcpr_clk_src,
.clkr.hw.init = &(struct clk_init_data){
.name = "gcc_cpuss_rbcpr_clk_src",
- .parent_names = gcc_parent_names_8,
+ .parent_names = gcc_parent_names_8_ao,
.num_parents = 3,
.ops = &clk_rcg2_ops,
},
diff --git a/drivers/clk/qcom/mdss/mdss-pll.c b/drivers/clk/qcom/mdss/mdss-pll.c
index c0ad6bf..7b95b10 100644
--- a/drivers/clk/qcom/mdss/mdss-pll.c
+++ b/drivers/clk/qcom/mdss/mdss-pll.c
@@ -229,7 +229,7 @@
label = of_get_property(pdev->dev.of_node, "label", NULL);
if (!label)
- pr_info("%d: MDSS pll label not specified\n");
+ pr_info("MDSS pll label not specified\n");
else
pr_info("MDSS pll label = %s\n", label);
diff --git a/drivers/clk/qcom/mdss/mdss_pll_trace.h b/drivers/clk/qcom/mdss/mdss_pll_trace.h
index fd193bf..cf46c7f 100644
--- a/drivers/clk/qcom/mdss/mdss_pll_trace.h
+++ b/drivers/clk/qcom/mdss/mdss_pll_trace.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#if !defined(_MDSS_PLL_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
@@ -13,7 +13,7 @@
#undef TRACE_SYSTEM
#define TRACE_SYSTEM mdss_pll
#undef TRACE_INCLUDE_FILE
-#define TRACE_INCLUDE_FILE mdss_pll_trace
+#define TRACE_INCLUDE_FILE ../../drivers/clk/qcom/mdss/mdss_pll_trace
TRACE_EVENT(mdss_pll_lock_start,
diff --git a/drivers/clk/ti/divider.c b/drivers/clk/ti/divider.c
index ccfb4d9..079f0be 100644
--- a/drivers/clk/ti/divider.c
+++ b/drivers/clk/ti/divider.c
@@ -367,8 +367,10 @@
num_dividers = i;
tmp = kcalloc(valid_div + 1, sizeof(*tmp), GFP_KERNEL);
- if (!tmp)
+ if (!tmp) {
+ *table = ERR_PTR(-ENOMEM);
return -ENOMEM;
+ }
valid_div = 0;
*width = 0;
@@ -403,6 +405,7 @@
{
struct clk_omap_divider *div;
struct clk_omap_reg *reg;
+ int ret;
if (!setup)
return NULL;
@@ -422,6 +425,12 @@
div->flags |= CLK_DIVIDER_POWER_OF_TWO;
div->table = _get_div_table_from_setup(setup, &div->width);
+ if (IS_ERR(div->table)) {
+ ret = PTR_ERR(div->table);
+ kfree(div);
+ return ERR_PTR(ret);
+ }
+
div->shift = setup->bit_shift;
div->latch = -EINVAL;
diff --git a/drivers/connector/cn_proc.c b/drivers/connector/cn_proc.c
index ed5e424..ad48fd5 100644
--- a/drivers/connector/cn_proc.c
+++ b/drivers/connector/cn_proc.c
@@ -250,6 +250,7 @@
{
struct cn_msg *msg;
struct proc_event *ev;
+ struct task_struct *parent;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
@@ -262,8 +263,14 @@
ev->what = PROC_EVENT_COREDUMP;
ev->event_data.coredump.process_pid = task->pid;
ev->event_data.coredump.process_tgid = task->tgid;
- ev->event_data.coredump.parent_pid = task->real_parent->pid;
- ev->event_data.coredump.parent_tgid = task->real_parent->tgid;
+
+ rcu_read_lock();
+ if (pid_alive(task)) {
+ parent = rcu_dereference(task->real_parent);
+ ev->event_data.coredump.parent_pid = parent->pid;
+ ev->event_data.coredump.parent_tgid = parent->tgid;
+ }
+ rcu_read_unlock();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
@@ -276,6 +283,7 @@
{
struct cn_msg *msg;
struct proc_event *ev;
+ struct task_struct *parent;
__u8 buffer[CN_PROC_MSG_SIZE] __aligned(8);
if (atomic_read(&proc_event_num_listeners) < 1)
@@ -290,8 +298,14 @@
ev->event_data.exit.process_tgid = task->tgid;
ev->event_data.exit.exit_code = task->exit_code;
ev->event_data.exit.exit_signal = task->exit_signal;
- ev->event_data.exit.parent_pid = task->real_parent->pid;
- ev->event_data.exit.parent_tgid = task->real_parent->tgid;
+
+ rcu_read_lock();
+ if (pid_alive(task)) {
+ parent = rcu_dereference(task->real_parent);
+ ev->event_data.exit.parent_pid = parent->pid;
+ ev->event_data.exit.parent_tgid = parent->tgid;
+ }
+ rcu_read_unlock();
memcpy(&msg->id, &cn_proc_event_id, sizeof(msg->id));
msg->ack = 0; /* not used */
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index c01c683..5279839 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -358,7 +358,7 @@
}
cpufreq_stats_record_transition(policy, freqs->new);
- cpufreq_times_record_transition(freqs);
+ cpufreq_times_record_transition(policy, freqs->new);
policy->cur = freqs->new;
}
}
@@ -555,13 +555,13 @@
* SYSFS INTERFACE *
*********************************************************************/
static ssize_t show_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%d\n", cpufreq_driver->boost_enabled);
}
-static ssize_t store_boost(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t count)
+static ssize_t store_boost(struct kobject *kobj, struct kobj_attribute *attr,
+ const char *buf, size_t count)
{
int ret, enable;
@@ -1869,9 +1869,15 @@
unsigned int cpufreq_driver_fast_switch(struct cpufreq_policy *policy,
unsigned int target_freq)
{
+ int ret;
+
target_freq = clamp_val(target_freq, policy->min, policy->max);
- return cpufreq_driver->fast_switch(policy, target_freq);
+ ret = cpufreq_driver->fast_switch(policy, target_freq);
+ if (ret)
+ cpufreq_times_record_transition(policy, ret);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(cpufreq_driver_fast_switch);
diff --git a/drivers/cpufreq/cpufreq_times.c b/drivers/cpufreq/cpufreq_times.c
index a43eeee..2883d67 100644
--- a/drivers/cpufreq/cpufreq_times.c
+++ b/drivers/cpufreq/cpufreq_times.c
@@ -32,11 +32,17 @@
static DEFINE_SPINLOCK(task_time_in_state_lock); /* task->time_in_state */
static DEFINE_SPINLOCK(uid_lock); /* uid_hash_table */
+struct concurrent_times {
+ atomic64_t active[NR_CPUS];
+ atomic64_t policy[NR_CPUS];
+};
+
struct uid_entry {
uid_t uid;
unsigned int max_state;
struct hlist_node hash;
struct rcu_head rcu;
+ struct concurrent_times *concurrent_times;
u64 time_in_state[0];
};
@@ -87,6 +93,7 @@
static struct uid_entry *find_or_register_uid_locked(uid_t uid)
{
struct uid_entry *uid_entry, *temp;
+ struct concurrent_times *times;
unsigned int max_state = READ_ONCE(next_offset);
size_t alloc_size = sizeof(*uid_entry) + max_state *
sizeof(uid_entry->time_in_state[0]);
@@ -115,9 +122,15 @@
uid_entry = kzalloc(alloc_size, GFP_ATOMIC);
if (!uid_entry)
return NULL;
+ times = kzalloc(sizeof(*times), GFP_ATOMIC);
+ if (!times) {
+ kfree(uid_entry);
+ return NULL;
+ }
uid_entry->uid = uid;
uid_entry->max_state = max_state;
+ uid_entry->concurrent_times = times;
hash_add_rcu(uid_hash_table, &uid_entry->hash, uid);
@@ -180,10 +193,12 @@
static void *uid_seq_next(struct seq_file *seq, void *v, loff_t *pos)
{
- (*pos)++;
+ do {
+ (*pos)++;
- if (*pos >= HASH_SIZE(uid_hash_table))
- return NULL;
+ if (*pos >= HASH_SIZE(uid_hash_table))
+ return NULL;
+ } while (hlist_empty(&uid_hash_table[*pos]));
return &uid_hash_table[*pos];
}
@@ -207,7 +222,8 @@
if (freqs->freq_table[i] ==
CPUFREQ_ENTRY_INVALID)
continue;
- seq_printf(m, " %d", freqs->freq_table[i]);
+ seq_put_decimal_ull(m, " ",
+ freqs->freq_table[i]);
}
}
seq_putc(m, '\n');
@@ -216,13 +232,16 @@
rcu_read_lock();
hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
- if (uid_entry->max_state)
- seq_printf(m, "%d:", uid_entry->uid);
+ if (uid_entry->max_state) {
+ seq_put_decimal_ull(m, "", uid_entry->uid);
+ seq_putc(m, ':');
+ }
for (i = 0; i < uid_entry->max_state; ++i) {
+ u64 time;
if (freq_index_invalid(i))
continue;
- seq_printf(m, " %lu", (unsigned long)nsec_to_clock_t(
- uid_entry->time_in_state[i]));
+ time = nsec_to_clock_t(uid_entry->time_in_state[i]);
+ seq_put_decimal_ull(m, " ", time);
}
if (uid_entry->max_state)
seq_putc(m, '\n');
@@ -232,6 +251,86 @@
return 0;
}
+static int concurrent_time_seq_show(struct seq_file *m, void *v,
+ atomic64_t *(*get_times)(struct concurrent_times *))
+{
+ struct uid_entry *uid_entry;
+ int i, num_possible_cpus = num_possible_cpus();
+
+ rcu_read_lock();
+
+ hlist_for_each_entry_rcu(uid_entry, (struct hlist_head *)v, hash) {
+ atomic64_t *times = get_times(uid_entry->concurrent_times);
+
+ seq_put_decimal_ull(m, "", (u64)uid_entry->uid);
+ seq_putc(m, ':');
+
+ for (i = 0; i < num_possible_cpus; ++i) {
+ u64 time = nsec_to_clock_t(atomic64_read(×[i]));
+
+ seq_put_decimal_ull(m, " ", time);
+ }
+ seq_putc(m, '\n');
+ }
+
+ rcu_read_unlock();
+
+ return 0;
+}
+
+static inline atomic64_t *get_active_times(struct concurrent_times *times)
+{
+ return times->active;
+}
+
+static int concurrent_active_time_seq_show(struct seq_file *m, void *v)
+{
+ if (v == uid_hash_table) {
+ seq_put_decimal_ull(m, "cpus: ", num_possible_cpus());
+ seq_putc(m, '\n');
+ }
+
+ return concurrent_time_seq_show(m, v, get_active_times);
+}
+
+static inline atomic64_t *get_policy_times(struct concurrent_times *times)
+{
+ return times->policy;
+}
+
+static int concurrent_policy_time_seq_show(struct seq_file *m, void *v)
+{
+ int i;
+ struct cpu_freqs *freqs, *last_freqs = NULL;
+
+ if (v == uid_hash_table) {
+ int cnt = 0;
+
+ for_each_possible_cpu(i) {
+ freqs = all_freqs[i];
+ if (!freqs)
+ continue;
+ if (freqs != last_freqs) {
+ if (last_freqs) {
+ seq_put_decimal_ull(m, ": ", cnt);
+ seq_putc(m, ' ');
+ cnt = 0;
+ }
+ seq_put_decimal_ull(m, "policy", i);
+
+ last_freqs = freqs;
+ }
+ cnt++;
+ }
+ if (last_freqs) {
+ seq_put_decimal_ull(m, ": ", cnt);
+ seq_putc(m, '\n');
+ }
+ }
+
+ return concurrent_time_seq_show(m, v, get_policy_times);
+}
+
void cpufreq_task_times_init(struct task_struct *p)
{
unsigned long flags;
@@ -326,11 +425,16 @@
{
unsigned long flags;
unsigned int state;
+ unsigned int active_cpu_cnt = 0;
+ unsigned int policy_cpu_cnt = 0;
+ unsigned int policy_first_cpu;
struct uid_entry *uid_entry;
struct cpu_freqs *freqs = all_freqs[task_cpu(p)];
+ struct cpufreq_policy *policy;
uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p));
+ int cpu = 0;
- if (!freqs || p->flags & PF_EXITING)
+ if (!freqs || is_idle_task(p) || p->flags & PF_EXITING)
return;
state = freqs->offset + READ_ONCE(freqs->last_index);
@@ -346,6 +450,42 @@
if (uid_entry && state < uid_entry->max_state)
uid_entry->time_in_state[state] += cputime;
spin_unlock_irqrestore(&uid_lock, flags);
+
+ rcu_read_lock();
+ uid_entry = find_uid_entry_rcu(uid);
+ if (!uid_entry) {
+ rcu_read_unlock();
+ return;
+ }
+
+ for_each_possible_cpu(cpu)
+ if (!idle_cpu(cpu))
+ ++active_cpu_cnt;
+
+ atomic64_add(cputime,
+ &uid_entry->concurrent_times->active[active_cpu_cnt - 1]);
+
+ policy = cpufreq_cpu_get(task_cpu(p));
+ if (!policy) {
+ /*
+ * This CPU may have just come up and not have a cpufreq policy
+ * yet.
+ */
+ rcu_read_unlock();
+ return;
+ }
+
+ for_each_cpu(cpu, policy->related_cpus)
+ if (!idle_cpu(cpu))
+ ++policy_cpu_cnt;
+
+ policy_first_cpu = cpumask_first(policy->related_cpus);
+ cpufreq_cpu_put(policy);
+
+ atomic64_add(cputime,
+ &uid_entry->concurrent_times->policy[policy_first_cpu +
+ policy_cpu_cnt - 1]);
+ rcu_read_unlock();
}
void cpufreq_times_create_policy(struct cpufreq_policy *policy)
@@ -387,6 +527,14 @@
all_freqs[cpu] = freqs;
}
+static void uid_entry_reclaim(struct rcu_head *rcu)
+{
+ struct uid_entry *uid_entry = container_of(rcu, struct uid_entry, rcu);
+
+ kfree(uid_entry->concurrent_times);
+ kfree(uid_entry);
+}
+
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end)
{
struct uid_entry *uid_entry;
@@ -400,7 +548,7 @@
hash, uid_start) {
if (uid_start == uid_entry->uid) {
hash_del_rcu(&uid_entry->hash);
- kfree_rcu(uid_entry, rcu);
+ call_rcu(&uid_entry->rcu, uid_entry_reclaim);
}
}
}
@@ -408,24 +556,17 @@
spin_unlock_irqrestore(&uid_lock, flags);
}
-void cpufreq_times_record_transition(struct cpufreq_freqs *freq)
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+ unsigned int new_freq)
{
int index;
- struct cpu_freqs *freqs = all_freqs[freq->cpu];
- struct cpufreq_policy *policy;
-
+ struct cpu_freqs *freqs = all_freqs[policy->cpu];
if (!freqs)
return;
- policy = cpufreq_cpu_get(freq->cpu);
- if (!policy)
- return;
-
- index = cpufreq_frequency_table_get_index(policy, freq->new);
+ index = cpufreq_frequency_table_get_index(policy, new_freq);
if (index >= 0)
WRITE_ONCE(freqs->last_index, index);
-
- cpufreq_cpu_put(policy);
}
static const struct seq_operations uid_time_in_state_seq_ops = {
@@ -453,11 +594,55 @@
.release = seq_release,
};
+static const struct seq_operations concurrent_active_time_seq_ops = {
+ .start = uid_seq_start,
+ .next = uid_seq_next,
+ .stop = uid_seq_stop,
+ .show = concurrent_active_time_seq_show,
+};
+
+static int concurrent_active_time_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &concurrent_active_time_seq_ops);
+}
+
+static const struct file_operations concurrent_active_time_fops = {
+ .open = concurrent_active_time_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+static const struct seq_operations concurrent_policy_time_seq_ops = {
+ .start = uid_seq_start,
+ .next = uid_seq_next,
+ .stop = uid_seq_stop,
+ .show = concurrent_policy_time_seq_show,
+};
+
+static int concurrent_policy_time_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &concurrent_policy_time_seq_ops);
+}
+
+static const struct file_operations concurrent_policy_time_fops = {
+ .open = concurrent_policy_time_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
static int __init cpufreq_times_init(void)
{
proc_create_data("uid_time_in_state", 0444, NULL,
&uid_time_in_state_fops, NULL);
+ proc_create_data("uid_concurrent_active_time", 0444, NULL,
+ &concurrent_active_time_fops, NULL);
+
+ proc_create_data("uid_concurrent_policy_time", 0444, NULL,
+ &concurrent_policy_time_fops, NULL);
+
return 0;
}
diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c
index b6a1aad..a005711 100644
--- a/drivers/cpufreq/intel_pstate.c
+++ b/drivers/cpufreq/intel_pstate.c
@@ -833,7 +833,7 @@
/************************** sysfs begin ************************/
#define show_one(file_name, object) \
static ssize_t show_##file_name \
- (struct kobject *kobj, struct attribute *attr, char *buf) \
+ (struct kobject *kobj, struct kobj_attribute *attr, char *buf) \
{ \
return sprintf(buf, "%u\n", global.object); \
}
@@ -842,7 +842,7 @@
static int intel_pstate_update_status(const char *buf, size_t size);
static ssize_t show_status(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
@@ -853,7 +853,7 @@
return ret;
}
-static ssize_t store_status(struct kobject *a, struct attribute *b,
+static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
char *p = memchr(buf, '\n', count);
@@ -867,7 +867,7 @@
}
static ssize_t show_turbo_pct(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total, no_turbo, turbo_pct;
@@ -893,7 +893,7 @@
}
static ssize_t show_num_pstates(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
struct cpudata *cpu;
int total;
@@ -914,7 +914,7 @@
}
static ssize_t show_no_turbo(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
ssize_t ret;
@@ -936,7 +936,7 @@
return ret;
}
-static ssize_t store_no_turbo(struct kobject *a, struct attribute *b,
+static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -983,7 +983,7 @@
return count;
}
-static ssize_t store_max_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1013,7 +1013,7 @@
return count;
}
-static ssize_t store_min_perf_pct(struct kobject *a, struct attribute *b,
+static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
@@ -1045,12 +1045,13 @@
}
static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
- struct attribute *attr, char *buf)
+ struct kobj_attribute *attr, char *buf)
{
return sprintf(buf, "%u\n", hwp_boost);
}
-static ssize_t store_hwp_dynamic_boost(struct kobject *a, struct attribute *b,
+static ssize_t store_hwp_dynamic_boost(struct kobject *a,
+ struct kobj_attribute *b,
const char *buf, size_t count)
{
unsigned int input;
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 4bf7256..a75b95f 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -203,6 +203,7 @@
u32 save_cim;
u32 save_cnda;
u32 save_cndc;
+ u32 irq_status;
unsigned long status;
struct tasklet_struct tasklet;
struct dma_slave_config sconfig;
@@ -1580,8 +1581,8 @@
struct at_xdmac_desc *desc;
u32 error_mask;
- dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08lx\n",
- __func__, atchan->status);
+ dev_dbg(chan2dev(&atchan->chan), "%s: status=0x%08x\n",
+ __func__, atchan->irq_status);
error_mask = AT_XDMAC_CIS_RBEIS
| AT_XDMAC_CIS_WBEIS
@@ -1589,15 +1590,15 @@
if (at_xdmac_chan_is_cyclic(atchan)) {
at_xdmac_handle_cyclic(atchan);
- } else if ((atchan->status & AT_XDMAC_CIS_LIS)
- || (atchan->status & error_mask)) {
+ } else if ((atchan->irq_status & AT_XDMAC_CIS_LIS)
+ || (atchan->irq_status & error_mask)) {
struct dma_async_tx_descriptor *txd;
- if (atchan->status & AT_XDMAC_CIS_RBEIS)
+ if (atchan->irq_status & AT_XDMAC_CIS_RBEIS)
dev_err(chan2dev(&atchan->chan), "read bus error!!!");
- if (atchan->status & AT_XDMAC_CIS_WBEIS)
+ if (atchan->irq_status & AT_XDMAC_CIS_WBEIS)
dev_err(chan2dev(&atchan->chan), "write bus error!!!");
- if (atchan->status & AT_XDMAC_CIS_ROIS)
+ if (atchan->irq_status & AT_XDMAC_CIS_ROIS)
dev_err(chan2dev(&atchan->chan), "request overflow error!!!");
spin_lock_bh(&atchan->lock);
@@ -1652,7 +1653,7 @@
atchan = &atxdmac->chan[i];
chan_imr = at_xdmac_chan_read(atchan, AT_XDMAC_CIM);
chan_status = at_xdmac_chan_read(atchan, AT_XDMAC_CIS);
- atchan->status = chan_status & chan_imr;
+ atchan->irq_status = chan_status & chan_imr;
dev_vdbg(atxdmac->dma.dev,
"%s: chan%d: imr=0x%x, status=0x%x\n",
__func__, i, chan_imr, chan_status);
@@ -1666,7 +1667,7 @@
at_xdmac_chan_read(atchan, AT_XDMAC_CDA),
at_xdmac_chan_read(atchan, AT_XDMAC_CUBC));
- if (atchan->status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
+ if (atchan->irq_status & (AT_XDMAC_CIS_RBEIS | AT_XDMAC_CIS_WBEIS))
at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
tasklet_schedule(&atchan->tasklet);
diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c
index aa1712b..7b7fba0 100644
--- a/drivers/dma/dmatest.c
+++ b/drivers/dma/dmatest.c
@@ -642,11 +642,9 @@
srcs[i] = um->addr[i] + src_off;
ret = dma_mapping_error(dev->dev, um->addr[i]);
if (ret) {
- dmaengine_unmap_put(um);
result("src mapping error", total_tests,
src_off, dst_off, len, ret);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
um->to_cnt++;
}
@@ -661,11 +659,9 @@
DMA_BIDIRECTIONAL);
ret = dma_mapping_error(dev->dev, dsts[i]);
if (ret) {
- dmaengine_unmap_put(um);
result("dst mapping error", total_tests,
src_off, dst_off, len, ret);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
um->bidi_cnt++;
}
@@ -693,12 +689,10 @@
}
if (!tx) {
- dmaengine_unmap_put(um);
result("prep error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
done->done = false;
@@ -707,12 +701,10 @@
cookie = tx->tx_submit(tx);
if (dma_submit_error(cookie)) {
- dmaengine_unmap_put(um);
result("submit error", total_tests, src_off,
dst_off, len, ret);
msleep(100);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
dma_async_issue_pending(chan);
@@ -725,16 +717,14 @@
dmaengine_unmap_put(um);
result("test timed out", total_tests, src_off, dst_off,
len, 0);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
} else if (status != DMA_COMPLETE) {
dmaengine_unmap_put(um);
result(status == DMA_ERROR ?
"completion error status" :
"completion busy status", total_tests, src_off,
dst_off, len, ret);
- failed_tests++;
- continue;
+ goto error_unmap_continue;
}
dmaengine_unmap_put(um);
@@ -779,6 +769,12 @@
verbose_result("test passed", total_tests, src_off,
dst_off, len, 0);
}
+
+ continue;
+
+error_unmap_continue:
+ dmaengine_unmap_put(um);
+ failed_tests++;
}
ktime = ktime_sub(ktime_get(), ktime);
ktime = ktime_sub(ktime, comparetime);
diff --git a/drivers/firmware/iscsi_ibft.c b/drivers/firmware/iscsi_ibft.c
index 6bc8e66..c51462f 100644
--- a/drivers/firmware/iscsi_ibft.c
+++ b/drivers/firmware/iscsi_ibft.c
@@ -542,6 +542,7 @@
case ISCSI_BOOT_TGT_NIC_ASSOC:
case ISCSI_BOOT_TGT_CHAP_TYPE:
rc = S_IRUGO;
+ break;
case ISCSI_BOOT_TGT_NAME:
if (tgt->tgt_name_len)
rc = S_IRUGO;
diff --git a/drivers/gnss/sirf.c b/drivers/gnss/sirf.c
index 2c22836..4596fde1 100644
--- a/drivers/gnss/sirf.c
+++ b/drivers/gnss/sirf.c
@@ -310,30 +310,26 @@
ret = -ENODEV;
goto err_put_device;
}
+
+ ret = regulator_enable(data->vcc);
+ if (ret)
+ goto err_put_device;
+
+ /* Wait for chip to boot into hibernate mode. */
+ msleep(SIRF_BOOT_DELAY);
}
if (data->wakeup) {
ret = gpiod_to_irq(data->wakeup);
if (ret < 0)
- goto err_put_device;
-
+ goto err_disable_vcc;
data->irq = ret;
- ret = devm_request_threaded_irq(dev, data->irq, NULL,
- sirf_wakeup_handler,
+ ret = request_threaded_irq(data->irq, NULL, sirf_wakeup_handler,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
"wakeup", data);
if (ret)
- goto err_put_device;
- }
-
- if (data->on_off) {
- ret = regulator_enable(data->vcc);
- if (ret)
- goto err_put_device;
-
- /* Wait for chip to boot into hibernate mode */
- msleep(SIRF_BOOT_DELAY);
+ goto err_disable_vcc;
}
if (IS_ENABLED(CONFIG_PM)) {
@@ -342,7 +338,7 @@
} else {
ret = sirf_runtime_resume(dev);
if (ret < 0)
- goto err_disable_vcc;
+ goto err_free_irq;
}
ret = gnss_register_device(gdev);
@@ -356,6 +352,9 @@
pm_runtime_disable(dev);
else
sirf_runtime_suspend(dev);
+err_free_irq:
+ if (data->wakeup)
+ free_irq(data->irq, data);
err_disable_vcc:
if (data->on_off)
regulator_disable(data->vcc);
@@ -376,6 +375,9 @@
else
sirf_runtime_suspend(&serdev->dev);
+ if (data->wakeup)
+ free_irq(data->irq, data);
+
if (data->on_off)
regulator_disable(data->vcc);
diff --git a/drivers/gpio/gpio-vf610.c b/drivers/gpio/gpio-vf610.c
index d4ad6d0..7e09ce7 100644
--- a/drivers/gpio/gpio-vf610.c
+++ b/drivers/gpio/gpio-vf610.c
@@ -259,6 +259,7 @@
struct vf610_gpio_port *port;
struct resource *iores;
struct gpio_chip *gc;
+ int i;
int ret;
port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
@@ -298,6 +299,10 @@
if (ret < 0)
return ret;
+ /* Mask all GPIO interrupts */
+ for (i = 0; i < gc->ngpio; i++)
+ vf610_gpio_writel(0, port->base + PORT_PCR(i));
+
/* Clear the interrupt status register for all GPIO's */
vf610_gpio_writel(~0, port->base + PORT_ISFR);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 7b4e657a..c3df75a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -1443,7 +1443,8 @@
effective_mode &= ~S_IWUSR;
if ((adev->flags & AMD_IS_APU) &&
- (attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
+ (attr == &sensor_dev_attr_power1_average.dev_attr.attr ||
+ attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr ||
attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr||
attr == &sensor_dev_attr_power1_cap.dev_attr.attr))
return 0;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 1c5d97f..8dcf622 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -37,6 +37,7 @@
#include "amdgpu_display.h"
#include <drm/amdgpu_drm.h>
#include <linux/dma-buf.h>
+#include <linux/dma-fence-array.h>
static const struct dma_buf_ops amdgpu_dmabuf_ops;
@@ -188,6 +189,48 @@
return ERR_PTR(ret);
}
+static int
+__reservation_object_make_exclusive(struct reservation_object *obj)
+{
+ struct dma_fence **fences;
+ unsigned int count;
+ int r;
+
+ if (!reservation_object_get_list(obj)) /* no shared fences to convert */
+ return 0;
+
+ r = reservation_object_get_fences_rcu(obj, NULL, &count, &fences);
+ if (r)
+ return r;
+
+ if (count == 0) {
+ /* Now that was unexpected. */
+ } else if (count == 1) {
+ reservation_object_add_excl_fence(obj, fences[0]);
+ dma_fence_put(fences[0]);
+ kfree(fences);
+ } else {
+ struct dma_fence_array *array;
+
+ array = dma_fence_array_create(count, fences,
+ dma_fence_context_alloc(1), 0,
+ false);
+ if (!array)
+ goto err_fences_put;
+
+ reservation_object_add_excl_fence(obj, &array->base);
+ dma_fence_put(&array->base);
+ }
+
+ return 0;
+
+err_fences_put:
+ while (count--)
+ dma_fence_put(fences[count]);
+ kfree(fences);
+ return -ENOMEM;
+}
+
/**
* amdgpu_gem_map_attach - &dma_buf_ops.attach implementation
* @dma_buf: shared DMA buffer
@@ -219,16 +262,16 @@
if (attach->dev->driver != adev->dev->driver) {
/*
- * Wait for all shared fences to complete before we switch to future
- * use of exclusive fence on this prime shared bo.
+ * We only create shared fences for internal use, but importers
+ * of the dmabuf rely on exclusive fences for implicitly
+ * tracking write hazards. As any of the current fences may
+ * correspond to a write, we need to convert all existing
+ * fences on the reservation object into a single exclusive
+ * fence.
*/
- r = reservation_object_wait_timeout_rcu(bo->tbo.resv,
- true, false,
- MAX_SCHEDULE_TIMEOUT);
- if (unlikely(r < 0)) {
- DRM_DEBUG_PRIME("Fence wait failed: %li\n", r);
+ r = __reservation_object_make_exclusive(bo->tbo.resv);
+ if (r)
goto error_unreserve;
- }
}
/* pin buffer into GTT */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 6a84526..49fe508 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -3011,14 +3011,15 @@
struct amdgpu_task_info *task_info)
{
struct amdgpu_vm *vm;
+ unsigned long flags;
- spin_lock(&adev->vm_manager.pasid_lock);
+ spin_lock_irqsave(&adev->vm_manager.pasid_lock, flags);
vm = idr_find(&adev->vm_manager.pasid_idr, pasid);
if (vm)
*task_info = vm->task_info;
- spin_unlock(&adev->vm_manager.pasid_lock);
+ spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
/**
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 1d74aed..94f5c364 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1573,6 +1573,15 @@
if (old_plane_state->fb != new_plane_state->fb)
return -EINVAL;
+ /*
+ * FIXME: Since prepare_fb and cleanup_fb are always called on
+ * the new_plane_state for async updates we need to block framebuffer
+ * changes. This prevents use of a fb that's been cleaned up and
+ * double cleanups from occuring.
+ */
+ if (old_plane_state->fb != new_plane_state->fb)
+ return -EINVAL;
+
funcs = plane->helper_private;
if (!funcs->atomic_async_update)
return -EINVAL;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index d708472..6794d60 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -51,10 +51,6 @@
int id,
struct drm_dp_payload *payload);
-static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- int offset, int size, u8 *bytes);
-
static void drm_dp_send_link_address(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_branch *mstb);
static int drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
@@ -439,6 +435,7 @@
if (idx > raw->curlen)
goto fail_len;
repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+ idx++;
if (idx > raw->curlen)
goto fail_len;
@@ -1402,7 +1399,6 @@
return false;
}
-#if 0
static int build_dpcd_read(struct drm_dp_sideband_msg_tx *msg, u8 port_num, u32 offset, u8 num_bytes)
{
struct drm_dp_sideband_msg_req_body req;
@@ -1415,7 +1411,6 @@
return 0;
}
-#endif
static int drm_dp_send_sideband_msg(struct drm_dp_mst_topology_mgr *mgr,
bool up, u8 *msg, int len)
@@ -1981,30 +1976,65 @@
}
EXPORT_SYMBOL(drm_dp_update_payload_part2);
-#if 0 /* unused as of yet */
-static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
- int offset, int size)
+ int offset, int size, u8 *bytes)
{
int len;
+ int ret;
struct drm_dp_sideband_msg_tx *txmsg;
+ struct drm_dp_mst_branch *mstb;
+
+ memset(bytes, 0, size);
+
+ mstb = drm_dp_get_validated_mstb_ref(mgr, port->parent);
+ if (!mstb)
+ return -EINVAL;
txmsg = kzalloc(sizeof(*txmsg), GFP_KERNEL);
- if (!txmsg)
- return -ENOMEM;
+ if (!txmsg) {
+ ret = -ENOMEM;
+ goto fail_put;
+ }
- len = build_dpcd_read(txmsg, port->port_num, 0, 8);
- txmsg->dst = port->parent;
+ len = build_dpcd_read(txmsg, port->port_num, offset, size);
+ txmsg->dst = mstb;
drm_dp_queue_down_tx(mgr, txmsg);
+ ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
+ if (ret <= 0) {
+ DRM_ERROR("dpcd read failed\n");
+ goto fail_free_msg;
+ }
- return 0;
+ if (txmsg->reply.reply_type == 1) {
+ DRM_ERROR("dpcd read nack received\n");
+ ret = -EINVAL;
+ goto fail_free_msg;
+ }
+
+ if (port->port_num != txmsg->reply.u.remote_dpcd_read_ack.port_number) {
+ DRM_ERROR("got incorrect port in response\n");
+ ret = -EINVAL;
+ goto fail_free_msg;
+ }
+
+ if (size > txmsg->reply.u.remote_dpcd_read_ack.num_bytes)
+ size = txmsg->reply.u.remote_dpcd_read_ack.num_bytes;
+
+ memcpy(bytes, txmsg->reply.u.remote_dpcd_read_ack.bytes, size);
+
+fail_free_msg:
+ kfree(txmsg);
+fail_put:
+ drm_dp_put_mst_branch_device(mstb);
+ return ret;
}
-#endif
+EXPORT_SYMBOL(drm_dp_send_dpcd_read);
-static int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
- struct drm_dp_mst_port *port,
- int offset, int size, u8 *bytes)
+int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes)
{
int len;
int ret;
@@ -2038,6 +2068,7 @@
drm_dp_put_mst_branch_device(mstb);
return ret;
}
+EXPORT_SYMBOL(drm_dp_send_dpcd_write);
static int drm_dp_encode_up_ack_reply(struct drm_dp_sideband_msg_tx *msg, u8 req_type)
{
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.c b/drivers/gpu/drm/msm/dp/dp_catalog.c
index dc85ccc..3d15cd9 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.c
@@ -882,6 +882,30 @@
0xe4);
}
+static void dp_catalog_ctrl_lane_pnswap(struct dp_catalog_ctrl *ctrl,
+ u8 ln_pnswap)
+{
+ struct dp_catalog_private *catalog;
+ struct dp_io_data *io_data;
+ u32 cfg0, cfg1;
+
+ catalog = dp_catalog_get_priv(ctrl);
+
+ cfg0 = 0x0a;
+ cfg1 = 0x0a;
+
+ cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
+ cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
+ cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
+ cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
+
+ io_data = catalog->io.dp_ln_tx0;
+ dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV, cfg0);
+
+ io_data = catalog->io.dp_ln_tx1;
+ dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV, cfg1);
+}
+
static void dp_catalog_ctrl_mainlink_ctrl(struct dp_catalog_ctrl *ctrl,
bool enable)
{
@@ -2509,6 +2533,7 @@
.state_ctrl = dp_catalog_ctrl_state_ctrl,
.config_ctrl = dp_catalog_ctrl_config_ctrl,
.lane_mapping = dp_catalog_ctrl_lane_mapping,
+ .lane_pnswap = dp_catalog_ctrl_lane_pnswap,
.mainlink_ctrl = dp_catalog_ctrl_mainlink_ctrl,
.set_pattern = dp_catalog_ctrl_set_pattern,
.reset = dp_catalog_ctrl_reset,
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog.h b/drivers/gpu/drm/msm/dp/dp_catalog.h
index 78aec713..85ed209 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog.h
+++ b/drivers/gpu/drm/msm/dp/dp_catalog.h
@@ -1,6 +1,19 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+<<<<<<< HEAD
+=======
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+>>>>>>> aacf58a... drm/msm/dp: Add P/N swap support for dp phy
*/
#ifndef _DP_CATALOG_H_
@@ -93,6 +106,7 @@
void (*config_ctrl)(struct dp_catalog_ctrl *ctrl, u8 ln_cnt);
void (*lane_mapping)(struct dp_catalog_ctrl *ctrl, bool flipped,
char *lane_map);
+ void (*lane_pnswap)(struct dp_catalog_ctrl *ctrl, u8 ln_pnswap);
void (*mainlink_ctrl)(struct dp_catalog_ctrl *ctrl, bool enable);
void (*set_pattern)(struct dp_catalog_ctrl *ctrl, u32 pattern);
void (*reset)(struct dp_catalog_ctrl *ctrl);
diff --git a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
index d5eebb4..51fa987 100644
--- a/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
+++ b/drivers/gpu/drm/msm/dp/dp_catalog_v420.c
@@ -266,6 +266,30 @@
}
}
+static void dp_catalog_ctrl_lane_pnswap_v420(struct dp_catalog_ctrl *ctrl,
+ u8 ln_pnswap)
+{
+ struct dp_catalog_private_v420 *catalog;
+ struct dp_io_data *io_data;
+ u32 cfg0, cfg1;
+
+ catalog = dp_catalog_get_priv_v420(ctrl);
+
+ cfg0 = 0x0a;
+ cfg1 = 0x0a;
+
+ cfg0 |= ((ln_pnswap >> 0) & 0x1) << 0;
+ cfg0 |= ((ln_pnswap >> 1) & 0x1) << 2;
+ cfg1 |= ((ln_pnswap >> 2) & 0x1) << 0;
+ cfg1 |= ((ln_pnswap >> 3) & 0x1) << 2;
+
+ io_data = catalog->io->dp_ln_tx0;
+ dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg0);
+
+ io_data = catalog->io->dp_ln_tx1;
+ dp_write(catalog->exe_mode, io_data, TXn_TX_POL_INV_V420, cfg1);
+}
+
static void dp_catalog_put_v420(struct dp_catalog *catalog)
{
struct dp_catalog_private_v420 *catalog_priv;
@@ -316,6 +340,7 @@
catalog->panel.config_msa = dp_catalog_panel_config_msa_v420;
catalog->ctrl.phy_lane_cfg = dp_catalog_ctrl_phy_lane_cfg_v420;
catalog->ctrl.update_vx_px = dp_catalog_ctrl_update_vx_px_v420;
+ catalog->ctrl.lane_pnswap = dp_catalog_ctrl_lane_pnswap_v420;
/* Set the default execution mode to hardware mode */
dp_catalog_set_exe_mode_v420(catalog, "hw");
diff --git a/drivers/gpu/drm/msm/dp/dp_ctrl.c b/drivers/gpu/drm/msm/dp/dp_ctrl.c
index 75a2f16..d84417e 100644
--- a/drivers/gpu/drm/msm/dp/dp_ctrl.c
+++ b/drivers/gpu/drm/msm/dp/dp_ctrl.c
@@ -161,6 +161,8 @@
if (enable) {
ctrl->catalog->lane_mapping(ctrl->catalog, ctrl->orientation,
ctrl->parser->l_map);
+ ctrl->catalog->lane_pnswap(ctrl->catalog,
+ ctrl->parser->l_pnswap);
ctrl->catalog->mst_config(ctrl->catalog, ctrl->mst_mode);
ctrl->catalog->config_ctrl(ctrl->catalog,
ctrl->link->link_params.lane_count);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.c b/drivers/gpu/drm/msm/dp/dp_debug.c
index e5f1b3e..e581303 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.c
+++ b/drivers/gpu/drm/msm/dp/dp_debug.c
@@ -11,7 +11,6 @@
#include "dp_power.h"
#include "dp_catalog.h"
#include "dp_aux.h"
-#include "dp_ctrl.h"
#include "dp_debug.h"
#include "drm_connector.h"
#include "sde_connector.h"
@@ -41,6 +40,8 @@
struct device *dev;
struct dp_debug dp_debug;
struct dp_parser *parser;
+ struct dp_ctrl *ctrl;
+ struct mutex lock;
};
static int dp_debug_get_edid_buf(struct dp_debug_private *debug)
@@ -90,6 +91,8 @@
if (!debug)
return -ENODEV;
+ mutex_lock(&debug->lock);
+
if (*ppos)
goto bail;
@@ -161,6 +164,7 @@
*/
pr_info("[%s]\n", edid ? "SET" : "CLEAR");
+ mutex_unlock(&debug->lock);
return rc;
}
@@ -180,6 +184,8 @@
if (!debug)
return -ENODEV;
+ mutex_lock(&debug->lock);
+
if (*ppos)
goto bail;
@@ -260,6 +266,7 @@
} else
debug->aux->dpcd_updated(debug->aux);
+ mutex_unlock(&debug->lock);
return rc;
}
@@ -747,7 +754,7 @@
const char __user *user_buff, size_t count, loff_t *ppos)
{
struct dp_debug_private *debug = file->private_data;
- char *buf;
+ char buf[SZ_32];
size_t len = 0;
if (!debug)
@@ -757,7 +764,9 @@
return 0;
len = min_t(size_t, count, SZ_32 - 1);
- buf = memdup_user(user_buff, len);
+ if (copy_from_user(buf, user_buff, len))
+ goto end;
+
buf[len] = '\0';
if (sscanf(buf, "%3s", debug->exe_mode) != 1)
@@ -1437,6 +1446,7 @@
if (dp_debug_get_dpcd_buf(debug)) {
devm_kfree(debug->dev, debug->edid);
+ debug->edid = NULL;
return;
}
@@ -1444,6 +1454,9 @@
debug->aux->set_sim_mode(debug->aux, true,
debug->edid, debug->dpcd);
} else {
+ debug->aux->abort(debug->aux);
+ debug->ctrl->abort(debug->ctrl);
+
debug->aux->set_sim_mode(debug->aux, false, NULL, NULL);
debug->dp_debug.sim_mode = false;
@@ -1482,6 +1495,8 @@
if (*ppos)
return 0;
+ mutex_lock(&debug->lock);
+
/* Leave room for termination char */
len = min_t(size_t, count, SZ_8 - 1);
if (copy_from_user(buf, user_buff, len))
@@ -1494,6 +1509,7 @@
dp_debug_set_sim_mode(debug, sim);
end:
+ mutex_unlock(&debug->lock);
return len;
}
@@ -1941,6 +1957,14 @@
DEBUG_NAME, rc);
}
+ file = debugfs_create_u32("max_lclk_khz", 0644, dir,
+ &debug->parser->max_lclk_khz);
+ if (IS_ERR_OR_NULL(file)) {
+ rc = PTR_ERR(file);
+ pr_err("[%s] debugfs max_lclk_khz failed, rc=%d\n",
+ DEBUG_NAME, rc);
+ }
+
return 0;
error_remove_dir:
@@ -1972,7 +1996,9 @@
debug = container_of(dp_debug, struct dp_debug_private, dp_debug);
+ mutex_lock(&debug->lock);
dp_debug_set_sim_mode(debug, false);
+ mutex_unlock(&debug->lock);
}
struct dp_debug *dp_debug_get(struct dp_debug_in *in)
@@ -1981,7 +2007,8 @@
struct dp_debug_private *debug;
struct dp_debug *dp_debug;
- if (!in->dev || !in->panel || !in->hpd || !in->link || !in->catalog) {
+ if (!in->dev || !in->panel || !in->hpd || !in->link ||
+ !in->catalog || !in->ctrl) {
pr_err("invalid input\n");
rc = -EINVAL;
goto error;
@@ -2002,12 +2029,15 @@
debug->connector = in->connector;
debug->catalog = in->catalog;
debug->parser = in->parser;
+ debug->ctrl = in->ctrl;
dp_debug = &debug->dp_debug;
dp_debug->vdisplay = 0;
dp_debug->hdisplay = 0;
dp_debug->vrefresh = 0;
+ mutex_init(&debug->lock);
+
rc = dp_debug_init(dp_debug);
if (rc) {
devm_kfree(in->dev, debug);
@@ -2059,6 +2089,8 @@
dp_debug_deinit(dp_debug);
+ mutex_destroy(&debug->lock);
+
if (debug->edid)
devm_kfree(debug->dev, debug->edid);
diff --git a/drivers/gpu/drm/msm/dp/dp_debug.h b/drivers/gpu/drm/msm/dp/dp_debug.h
index dfbc652..11b890e 100644
--- a/drivers/gpu/drm/msm/dp/dp_debug.h
+++ b/drivers/gpu/drm/msm/dp/dp_debug.h
@@ -7,6 +7,7 @@
#define _DP_DEBUG_H_
#include "dp_panel.h"
+#include "dp_ctrl.h"
#include "dp_link.h"
#include "dp_usbpd.h"
#include "dp_aux.h"
@@ -63,6 +64,7 @@
struct drm_connector **connector;
struct dp_catalog *catalog;
struct dp_parser *parser;
+ struct dp_ctrl *ctrl;
};
/**
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index bdd2478..b326a50 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -103,6 +103,8 @@
u32 tot_dsc_blks_in_use;
+ bool process_hpd_connect;
+
struct notifier_block usb_nb;
};
@@ -111,11 +113,6 @@
{}
};
-static bool dp_display_framework_ready(struct dp_display_private *dp)
-{
- return dp->dp_display.post_open ? false : true;
-}
-
static inline bool dp_display_is_hdcp_enabled(struct dp_display_private *dp)
{
return dp->link->hdcp_status.hdcp_version && dp->hdcp.ops;
@@ -236,16 +233,64 @@
struct sde_hdcp_ops *ops = dev->ops;
void *fd = dev->fd;
- if (!fd || !ops || (dp->hdcp.source_cap & dev->ver))
+ if (!fd || !ops)
continue;
- if (ops->feature_supported(fd))
+ if (ops->set_mode && ops->set_mode(fd, dp->mst.mst_active))
+ continue;
+
+ if (!(dp->hdcp.source_cap & dev->ver) &&
+ ops->feature_supported &&
+ ops->feature_supported(fd))
dp->hdcp.source_cap |= dev->ver;
}
dp_display_update_hdcp_status(dp, false);
}
+static void dp_display_hdcp_register_streams(struct dp_display_private *dp)
+{
+ int rc;
+ size_t i;
+ struct sde_hdcp_ops *ops = dp->hdcp.ops;
+ void *data = dp->hdcp.data;
+
+ if (dp_display_is_ready(dp) && dp->mst.mst_active && ops &&
+ ops->register_streams){
+ struct stream_info streams[DP_STREAM_MAX];
+ int index = 0;
+
+ pr_debug("Registering all active panel streams with HDCP\n");
+ for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
+ if (!dp->active_panels[i])
+ continue;
+ streams[index].stream_id = i;
+ streams[index].virtual_channel =
+ dp->active_panels[i]->vcpi;
+ index++;
+ }
+
+ if (index > 0) {
+ rc = ops->register_streams(data, index, streams);
+ if (rc)
+ pr_err("failed to register streams. rc = %d\n",
+ rc);
+ }
+ }
+}
+
+static void dp_display_hdcp_deregister_stream(struct dp_display_private *dp,
+ enum dp_stream_id stream_id)
+{
+ if (dp->hdcp.ops->deregister_streams) {
+ struct stream_info stream = {stream_id,
+ dp->active_panels[stream_id]->vcpi};
+
+ pr_debug("Deregistering stream within HDCP library\n");
+ dp->hdcp.ops->deregister_streams(dp->hdcp.data, 1, &stream);
+ }
+}
+
static void dp_display_hdcp_cb_work(struct work_struct *work)
{
struct dp_display_private *dp;
@@ -255,12 +300,21 @@
void *data;
int rc = 0;
u32 hdcp_auth_state;
+ u8 sink_status = 0;
dp = container_of(dw, struct dp_display_private, hdcp_cb_work);
if (!dp->power_on || !dp->is_connected || atomic_read(&dp->aborted))
return;
+ drm_dp_dpcd_readb(dp->aux->drm_aux, DP_SINK_STATUS, &sink_status);
+ sink_status &= (DP_RECEIVE_PORT_0_STATUS | DP_RECEIVE_PORT_1_STATUS);
+ if (sink_status < 1) {
+ pr_debug("Sink not synchronized. Queuing again then exiting\n");
+ queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
+ return;
+ }
+
status = &dp->link->hdcp_status;
if (status->hdcp_state == HDCP_STATE_INACTIVE) {
@@ -268,6 +322,11 @@
dp_display_update_hdcp_info(dp);
if (dp_display_is_hdcp_enabled(dp)) {
+ if (dp->hdcp.ops && dp->hdcp.ops->on &&
+ dp->hdcp.ops->on(dp->hdcp.data)) {
+ dp_display_update_hdcp_status(dp, true);
+ return;
+ }
status->hdcp_state = HDCP_STATE_AUTHENTICATING;
} else {
dp_display_update_hdcp_status(dp, true);
@@ -294,11 +353,18 @@
switch (status->hdcp_state) {
case HDCP_STATE_AUTHENTICATING:
+ dp_display_hdcp_register_streams(dp);
if (dp->hdcp.ops && dp->hdcp.ops->authenticate)
rc = dp->hdcp.ops->authenticate(data);
break;
case HDCP_STATE_AUTH_FAIL:
if (dp_display_is_ready(dp) && dp->power_on) {
+ if (ops && ops->on && ops->on(data)) {
+ dp_display_update_hdcp_status(dp, true);
+ return;
+ }
+ dp_display_hdcp_register_streams(dp);
+ status->hdcp_state = HDCP_STATE_AUTHENTICATING;
if (ops && ops->reauthenticate) {
rc = ops->reauthenticate(data);
if (rc)
@@ -309,6 +375,7 @@
}
break;
default:
+ dp_display_hdcp_register_streams(dp);
break;
}
}
@@ -502,36 +569,6 @@
envp);
}
-static void dp_display_post_open(struct dp_display *dp_display)
-{
- struct drm_connector *connector;
- struct dp_display_private *dp;
-
- if (!dp_display) {
- pr_err("invalid input\n");
- return;
- }
-
- dp = container_of(dp_display, struct dp_display_private, dp_display);
- if (IS_ERR_OR_NULL(dp)) {
- pr_err("invalid params\n");
- return;
- }
-
- connector = dp->dp_display.base_connector;
-
- if (!connector) {
- pr_err("base connector not set\n");
- return;
- }
-
- /* if cable is already connected, send notification */
- if (dp->hpd->hpd_high)
- queue_work(dp->wq, &dp->connect_work);
- else
- dp_display->post_open = NULL;
-}
-
static int dp_display_send_hpd_notification(struct dp_display_private *dp)
{
int ret = 0;
@@ -541,6 +578,8 @@
if (!dp->mst.mst_active)
dp->dp_display.is_sst_connected = hpd;
+ else
+ dp->dp_display.is_sst_connected = false;
reinit_completion(&dp->notification_comp);
dp_display_send_hpd_event(dp);
@@ -551,9 +590,6 @@
if (!dp->mst.mst_active && (dp->power_on == hpd))
goto skip_wait;
- if (!dp_display_framework_ready(dp))
- goto skip_wait;
-
if (!wait_for_completion_timeout(&dp->notification_comp,
HZ * 5)) {
pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
@@ -571,30 +607,47 @@
dp->panel->mst_state = state;
}
-static void dp_display_process_mst_hpd_high(struct dp_display_private *dp)
+static void dp_display_process_mst_hpd_high(struct dp_display_private *dp,
+ bool mst_probe)
{
bool is_mst_receiver;
struct dp_mst_hpd_info info;
+ int ret;
- if (dp->parser->has_mst && dp->mst.drm_registered) {
- DP_MST_DEBUG("mst_hpd_high work\n");
+ if (!dp->parser->has_mst || !dp->mst.drm_registered) {
+ DP_MST_DEBUG("mst not enabled. has_mst:%d, registered:%d\n",
+ dp->parser->has_mst, dp->mst.drm_registered);
+ return;
+ }
+ DP_MST_DEBUG("mst_hpd_high work. mst_probe:%d\n", mst_probe);
+
+ if (!dp->mst.mst_active) {
is_mst_receiver = dp->panel->read_mst_cap(dp->panel);
- if (is_mst_receiver && !dp->mst.mst_active) {
-
- /* clear sink mst state */
- drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
-
- dp_display_update_mst_state(dp, true);
-
- info.mst_protocol = dp->parser->has_mst_sideband;
- info.mst_port_cnt = dp->debug->mst_port_cnt;
- info.edid = dp->debug->get_edid(dp->debug);
-
- if (dp->mst.cbs.hpd)
- dp->mst.cbs.hpd(&dp->dp_display, true, &info);
+ if (!is_mst_receiver) {
+ DP_MST_DEBUG("sink doesn't support mst\n");
+ return;
}
+
+ /* clear sink mst state */
+ drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL, 0);
+
+ ret = drm_dp_dpcd_writeb(dp->aux->drm_aux, DP_MSTM_CTRL,
+ DP_MST_EN | DP_UP_REQ_EN | DP_UPSTREAM_IS_SRC);
+ if (ret < 0) {
+ pr_err("sink mst enablement failed\n");
+ return;
+ }
+
+ dp_display_update_mst_state(dp, true);
+ } else if (dp->mst.mst_active && mst_probe) {
+ info.mst_protocol = dp->parser->has_mst_sideband;
+ info.mst_port_cnt = dp->debug->mst_port_cnt;
+ info.edid = dp->debug->get_edid(dp->debug);
+
+ if (dp->mst.cbs.hpd)
+ dp->mst.cbs.hpd(&dp->dp_display, true, &info);
}
DP_MST_DEBUG("mst_hpd_high. mst_active:%d\n", dp->mst.mst_active);
@@ -648,7 +701,16 @@
static int dp_display_process_hpd_high(struct dp_display_private *dp)
{
- int rc = 0;
+ int rc = -EINVAL;
+
+ mutex_lock(&dp->session_lock);
+
+ if (dp->is_connected) {
+ pr_debug("dp already connected, skipping hpd high\n");
+ mutex_unlock(&dp->session_lock);
+ rc = -EISCONN;
+ goto end;
+ }
dp->is_connected = true;
@@ -671,25 +733,32 @@
* ETIMEDOUT --> cable may have been removed
* ENOTCONN --> no downstream device connected
*/
- if (rc == -ETIMEDOUT || rc == -ENOTCONN)
+ if (rc == -ETIMEDOUT || rc == -ENOTCONN) {
+ dp->is_connected = false;
goto end;
+ }
dp->link->process_request(dp->link);
dp->panel->handle_sink_request(dp->panel);
- dp_display_process_mst_hpd_high(dp);
+ dp_display_process_mst_hpd_high(dp, false);
- mutex_lock(&dp->session_lock);
rc = dp->ctrl->on(dp->ctrl, dp->mst.mst_active,
dp->panel->fec_en, false);
if (rc) {
- mutex_unlock(&dp->session_lock);
+ dp->is_connected = false;
goto end;
}
+
+ dp->process_hpd_connect = false;
+
+ dp_display_process_mst_hpd_high(dp, true);
+end:
mutex_unlock(&dp->session_lock);
- dp_display_send_hpd_notification(dp);
-end:
+ if (!rc)
+ dp_display_send_hpd_notification(dp);
+
return rc;
}
@@ -715,6 +784,7 @@
int rc = 0;
dp->is_connected = false;
+ dp->process_hpd_connect = false;
dp_display_process_mst_hpd_low(dp);
@@ -755,11 +825,15 @@
goto end;
}
+ mutex_lock(&dp->session_lock);
dp_display_host_init(dp);
/* check for hpd high */
if (dp->hpd->hpd_high)
queue_work(dp->wq, &dp->connect_work);
+ else
+ dp->process_hpd_connect = true;
+ mutex_unlock(&dp->session_lock);
end:
return rc;
}
@@ -793,8 +867,10 @@
{
int idx;
struct dp_panel *dp_panel;
+ struct dp_link_hdcp_status *status = &dp->link->hdcp_status;
- if (dp_display_is_hdcp_enabled(dp)) {
+ if (dp_display_is_hdcp_enabled(dp) &&
+ status->hdcp_state != HDCP_STATE_INACTIVE) {
cancel_delayed_work_sync(&dp->hdcp_cb_work);
if (dp->hdcp.ops->off)
dp->hdcp.ops->off(dp->hdcp.data);
@@ -878,18 +954,12 @@
goto end;
}
- /*
- * In case cable/dongle is disconnected during adb shell stop,
- * reset psm_enabled flag to false since it is no more needed
- */
- if (dp->dp_display.post_open)
- dp->debug->psm_enabled = false;
-
- if (dp->debug->psm_enabled)
+ mutex_lock(&dp->session_lock);
+ if (dp->debug->psm_enabled && dp->core_initialized)
dp->link->psm_config(dp->link, &dp->panel->link_info, true);
+ mutex_unlock(&dp->session_lock);
dp_display_disconnect_sync(dp);
- dp->dp_display.post_open = NULL;
if (!dp->debug->sim_mode && !dp->parser->no_aux_switch
&& !dp->parser->gpio_aux_switch)
@@ -936,11 +1006,19 @@
struct dp_display_private *dp = container_of(work,
struct dp_display_private, attention_work);
- if (dp->debug->mst_hpd_sim)
- goto mst_attention;
+ mutex_lock(&dp->session_lock);
- if (dp->link->process_request(dp->link))
+ if (dp->debug->mst_hpd_sim || !dp->core_initialized) {
+ mutex_unlock(&dp->session_lock);
+ goto mst_attention;
+ }
+
+ if (dp->link->process_request(dp->link)) {
+ mutex_unlock(&dp->session_lock);
goto cp_irq;
+ }
+
+ mutex_unlock(&dp->session_lock);
if (dp->link->sink_request & DS_PORT_STATUS_CHANGED) {
if (dp_display_is_sink_count_zero(dp)) {
@@ -997,16 +1075,16 @@
return -ENODEV;
}
- pr_debug("hpd_irq:%d, hpd_high:%d, power_on:%d\n",
+ pr_debug("hpd_irq:%d, hpd_high:%d, power_on:%d, is_connected:%d\n",
dp->hpd->hpd_irq, dp->hpd->hpd_high,
- dp->power_on);
+ dp->power_on, dp->is_connected);
if (!dp->hpd->hpd_high)
dp_display_disconnect_sync(dp);
else if ((dp->hpd->hpd_irq && dp->core_initialized) ||
dp->debug->mst_hpd_sim)
queue_work(dp->wq, &dp->attention_work);
- else if (!dp->power_on)
+ else if (dp->process_hpd_connect || !dp->is_connected)
queue_work(dp->wq, &dp->connect_work);
else
pr_debug("ignored\n");
@@ -1228,6 +1306,7 @@
debug_in.connector = &dp->dp_display.base_connector;
debug_in.catalog = dp->catalog;
debug_in.parser = dp->parser;
+ debug_in.ctrl = dp->ctrl;
dp->debug = dp_debug_get(&debug_in);
if (IS_ERR(dp->debug)) {
@@ -1399,7 +1478,7 @@
static int dp_display_set_stream_info(struct dp_display *dp_display,
void *panel, u32 strm_id, u32 start_slot,
- u32 num_slots, u32 pbn)
+ u32 num_slots, u32 pbn, int vcpi)
{
int rc = 0;
struct dp_panel *dp_panel;
@@ -1432,7 +1511,7 @@
if (panel) {
dp_panel = panel;
dp_panel->set_stream_info(dp_panel, strm_id, start_slot,
- num_slots, pbn);
+ num_slots, pbn, vcpi);
}
mutex_unlock(&dp->session_lock);
@@ -1539,8 +1618,6 @@
cancel_delayed_work_sync(&dp->hdcp_cb_work);
queue_delayed_work(dp->wq, &dp->hdcp_cb_work, HZ);
end:
- /* clear framework event notifier */
- dp_display->post_open = NULL;
dp->aux->state |= DP_STATE_CTRL_POWERED_ON;
complete_all(&dp->notification_comp);
@@ -1552,7 +1629,9 @@
{
struct dp_display_private *dp;
struct dp_panel *dp_panel = panel;
+ struct dp_link_hdcp_status *status;
int rc = 0;
+ size_t i;
if (!dp_display || !panel) {
pr_err("invalid input\n");
@@ -1563,19 +1642,35 @@
mutex_lock(&dp->session_lock);
+ status = &dp->link->hdcp_status;
+
if (!dp->power_on) {
pr_debug("stream already powered off, return\n");
goto end;
}
- if (dp_display_is_hdcp_enabled(dp)) {
- cancel_delayed_work_sync(&dp->hdcp_cb_work);
+ if (dp_display_is_hdcp_enabled(dp) &&
+ status->hdcp_state != HDCP_STATE_INACTIVE) {
+ flush_delayed_work(&dp->hdcp_cb_work);
+ if (dp->mst.mst_active) {
+ dp_display_hdcp_deregister_stream(dp,
+ dp_panel->stream_id);
+ for (i = DP_STREAM_0; i < DP_STREAM_MAX; i++) {
+ if (i != dp_panel->stream_id &&
+ dp->active_panels[i]) {
+ pr_debug("Streams are still active. Skip disabling HDCP\n");
+ goto stream;
+ }
+ }
+ }
+
if (dp->hdcp.ops->off)
dp->hdcp.ops->off(dp->hdcp.data);
dp_display_update_hdcp_status(dp, true);
}
+stream:
if (dp_panel->audio_supported)
dp_panel->audio->off(dp_panel->audio);
@@ -1689,14 +1784,6 @@
dp->link->psm_config(dp->link, &dp->panel->link_info, true);
dp->debug->psm_enabled = true;
- /*
- * In case of framework reboot, the DP off sequence is executed
- * without any notification from driver. Initialize post_open
- * callback to notify DP connection once framework restarts.
- */
- dp_display->post_open = dp_display_post_open;
- dp->dp_display.is_sst_connected = false;
-
dp->ctrl->off(dp->ctrl);
dp_display_host_deinit(dp);
}
@@ -2259,6 +2346,77 @@
return 0;
}
+static int dp_display_mst_connector_update_link_info(
+ struct dp_display *dp_display,
+ struct drm_connector *connector)
+{
+ int rc = 0;
+ struct sde_connector *sde_conn;
+ struct dp_panel *dp_panel;
+ struct dp_display_private *dp;
+
+ if (!dp_display || !connector) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ if (!dp->mst.drm_registered) {
+ pr_debug("drm mst not registered\n");
+ return -EPERM;
+ }
+
+ sde_conn = to_sde_connector(connector);
+ if (!sde_conn->drv_panel) {
+ pr_err("invalid panel for connector:%d\n", connector->base.id);
+ return -EINVAL;
+ }
+
+ dp_panel = sde_conn->drv_panel;
+
+ memcpy(dp_panel->dpcd, dp->panel->dpcd,
+ DP_RECEIVER_CAP_SIZE + 1);
+ memcpy(dp_panel->dsc_dpcd, dp->panel->dsc_dpcd,
+ DP_RECEIVER_DSC_CAP_SIZE + 1);
+ memcpy(&dp_panel->link_info, &dp->panel->link_info,
+ sizeof(dp_panel->link_info));
+
+ DP_MST_DEBUG("dp mst connector:%d link info updated\n");
+
+ return rc;
+}
+
+static int dp_display_mst_get_fixed_topology_port(
+ struct dp_display *dp_display,
+ u32 strm_id, u32 *port_num)
+{
+ struct dp_display_private *dp;
+ u32 port;
+
+ if (!dp_display) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (strm_id >= DP_STREAM_MAX) {
+ pr_err("invalid stream id:%d\n", strm_id);
+ return -EINVAL;
+ }
+
+ dp = container_of(dp_display, struct dp_display_private, dp_display);
+
+ port = dp->parser->mst_fixed_port[strm_id];
+
+ if (!port || port > 255)
+ return -ENOENT;
+
+ if (port_num)
+ *port_num = port;
+
+ return 0;
+}
+
static int dp_display_get_mst_caps(struct dp_display *dp_display,
struct dp_mst_caps *mst_caps)
{
@@ -2332,7 +2490,7 @@
g_dp_display->unprepare = dp_display_unprepare;
g_dp_display->request_irq = dp_request_irq;
g_dp_display->get_debug = dp_get_debug;
- g_dp_display->post_open = dp_display_post_open;
+ g_dp_display->post_open = NULL;
g_dp_display->post_init = dp_display_post_init;
g_dp_display->config_hdr = dp_display_config_hdr;
g_dp_display->mst_install = dp_display_mst_install;
@@ -2342,12 +2500,16 @@
dp_display_mst_connector_uninstall;
g_dp_display->mst_connector_update_edid =
dp_display_mst_connector_update_edid;
+ g_dp_display->mst_connector_update_link_info =
+ dp_display_mst_connector_update_link_info;
g_dp_display->get_mst_caps = dp_display_get_mst_caps;
g_dp_display->set_stream_info = dp_display_set_stream_info;
g_dp_display->update_pps = dp_display_update_pps;
g_dp_display->convert_to_dp_mode = dp_display_convert_to_dp_mode;
g_dp_display->mst_get_connector_info =
dp_display_mst_get_connector_info;
+ g_dp_display->mst_get_fixed_topology_port =
+ dp_display_mst_get_fixed_topology_port;
rc = component_add(&pdev->dev, &dp_display_comp_ops);
if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_display.h b/drivers/gpu/drm/msm/dp/dp_display.h
index 410cee7..fe332af 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.h
+++ b/drivers/gpu/drm/msm/dp/dp_display.h
@@ -100,13 +100,18 @@
int (*mst_connector_update_edid)(struct dp_display *dp_display,
struct drm_connector *connector,
struct edid *edid);
+ int (*mst_connector_update_link_info)(struct dp_display *dp_display,
+ struct drm_connector *connector);
int (*mst_get_connector_info)(struct dp_display *dp_display,
struct drm_connector *connector,
struct dp_mst_connector *mst_conn);
+ int (*mst_get_fixed_topology_port)(struct dp_display *dp_display,
+ u32 strm_id, u32 *port_num);
int (*get_mst_caps)(struct dp_display *dp_display,
struct dp_mst_caps *mst_caps);
int (*set_stream_info)(struct dp_display *dp_display, void *panel,
- u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn);
+ u32 strm_id, u32 start_slot, u32 num_slots, u32 pbn,
+ int vcpi);
void (*convert_to_dp_mode)(struct dp_display *dp_display, void *panel,
const struct drm_display_mode *drm_mode,
struct dp_display_mode *dp_mode);
diff --git a/drivers/gpu/drm/msm/dp/dp_drm.c b/drivers/gpu/drm/msm/dp/dp_drm.c
index 9b3bb24..b3b116a 100644
--- a/drivers/gpu/drm/msm/dp/dp_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_drm.c
@@ -114,7 +114,7 @@
}
/* for SST force stream id, start slot and total slots to 0 */
- dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0);
+ dp->set_stream_info(dp, bridge->dp_panel, 0, 0, 0, 0, 0);
rc = dp->enable(dp, bridge->dp_panel);
if (rc) {
diff --git a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
index 3dd0fa1..f71c25e 100644
--- a/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
+++ b/drivers/gpu/drm/msm/dp/dp_hdcp2p2.c
@@ -51,7 +51,6 @@
u8 rx_status;
char abort_mask;
- bool cp_irq_done;
bool polling;
};
@@ -66,6 +65,25 @@
struct dp_hdcp2p2_int_set *int_set;
};
+static inline int dp_hdcp2p2_valid_handle(struct dp_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (!ctrl->lib_ctx) {
+ pr_err("HDCP library needs to be acquired\n");
+ return -EINVAL;
+ }
+
+ if (!ctrl->lib) {
+ pr_err("invalid lib ops data\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
static inline bool dp_hdcp2p2_is_valid_state(struct dp_hdcp2p2_ctrl *ctrl)
{
enum hdcp_transport_wakeup_cmd cmd;
@@ -174,6 +192,7 @@
if (dp_hdcp2p2_copy_buf(ctrl, data))
goto exit;
+ ctrl->polling = false;
switch (data->cmd) {
case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
@@ -216,38 +235,77 @@
atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
}
+static int dp_hdcp2p2_register(void *input, bool mst_enabled)
+{
+ int rc;
+ enum sde_hdcp_2x_device_type device_type;
+ struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
+
+ rc = dp_hdcp2p2_valid_handle(ctrl);
+ if (rc)
+ return rc;
+
+ if (mst_enabled)
+ device_type = HDCP_TXMTR_DP_MST;
+ else
+ device_type = HDCP_TXMTR_DP;
+
+ return sde_hdcp_2x_enable(ctrl->lib_ctx, device_type);
+}
+
+static int dp_hdcp2p2_on(void *input)
+{
+ int rc = 0;
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+ struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
+
+ rc = dp_hdcp2p2_valid_handle(ctrl);
+ if (rc)
+ return rc;
+
+ cdata.cmd = HDCP_2X_CMD_START;
+ cdata.context = ctrl->lib_ctx;
+ rc = ctrl->lib->wakeup(&cdata);
+ if (rc)
+ pr_err("Unable to start the HDCP 2.2 library (%d)\n", rc);
+
+ return rc;
+}
+
static void dp_hdcp2p2_off(void *input)
{
+ int rc;
struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
- struct hdcp_transport_wakeup_data cdata = {
- HDCP_TRANSPORT_CMD_AUTHENTICATE};
+ struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
- if (!ctrl) {
- pr_err("invalid input\n");
+ rc = dp_hdcp2p2_valid_handle(ctrl);
+ if (rc)
return;
- }
- if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
- pr_err("hdcp is off\n");
- return;
+ if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
+ cdata.cmd = HDCP_2X_CMD_STOP;
+ cdata.context = ctrl->lib_ctx;
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
}
dp_hdcp2p2_set_interrupts(ctrl, false);
dp_hdcp2p2_reset(ctrl);
- cdata.context = input;
- dp_hdcp2p2_wakeup(&cdata);
-
kthread_park(ctrl->thread);
+
+ sde_hdcp_2x_disable(ctrl->lib_ctx);
}
static int dp_hdcp2p2_authenticate(void *input)
{
+ int rc;
struct dp_hdcp2p2_ctrl *ctrl = input;
struct hdcp_transport_wakeup_data cdata = {
HDCP_TRANSPORT_CMD_AUTHENTICATE};
- int rc = 0;
+ rc = dp_hdcp2p2_valid_handle(ctrl);
+ if (rc)
+ return rc;
dp_hdcp2p2_set_interrupts(ctrl, true);
@@ -370,44 +428,34 @@
static bool dp_hdcp2p2_feature_supported(void *input)
{
+ int rc;
struct dp_hdcp2p2_ctrl *ctrl = input;
struct sde_hdcp_2x_ops *lib = NULL;
bool supported = false;
- if (!ctrl) {
- pr_err("invalid input\n");
- goto end;
- }
+ rc = dp_hdcp2p2_valid_handle(ctrl);
+ if (rc)
+ return supported;
lib = ctrl->lib;
- if (!lib) {
- pr_err("invalid lib ops data\n");
- goto end;
- }
-
if (lib->feature_supported)
supported = lib->feature_supported(
ctrl->lib_ctx);
-end:
+
return supported;
}
static void dp_hdcp2p2_force_encryption(void *data, bool enable)
{
+ int rc;
struct dp_hdcp2p2_ctrl *ctrl = data;
struct sde_hdcp_2x_ops *lib = NULL;
- if (!ctrl) {
- pr_err("invalid input\n");
+ rc = dp_hdcp2p2_valid_handle(ctrl);
+ if (rc)
return;
- }
lib = ctrl->lib;
- if (!lib) {
- pr_err("invalid lib ops data\n");
- return;
- }
-
if (lib->force_encryption)
lib->force_encryption(ctrl->lib_ctx, enable);
}
@@ -493,26 +541,12 @@
return;
}
- if (ctrl->rx_status) {
- if (!ctrl->cp_irq_done) {
- pr_debug("waiting for CP_IRQ\n");
- ctrl->polling = true;
- return;
- }
-
- if (ctrl->rx_status & ctrl->sink_rx_status) {
- ctrl->cp_irq_done = false;
- ctrl->sink_rx_status = 0;
- ctrl->rx_status = 0;
- }
- }
-
dp_hdcp2p2_get_msg_from_sink(ctrl);
}
static void dp_hdcp2p2_link_check(struct dp_hdcp2p2_ctrl *ctrl)
{
- int rc = 0;
+ int rc = 0, retries = 10;
struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
if (!ctrl) {
@@ -545,6 +579,11 @@
goto exit;
}
+ /* wait for polling to start till spec allowed timeout */
+ while (!ctrl->polling && retries--)
+ msleep(20);
+
+ /* check if sink has made a message available */
if (ctrl->polling && (ctrl->sink_rx_status & ctrl->rx_status)) {
ctrl->sink_rx_status = 0;
ctrl->rx_status = 0;
@@ -552,26 +591,19 @@
dp_hdcp2p2_get_msg_from_sink(ctrl);
ctrl->polling = false;
- } else {
- ctrl->cp_irq_done = true;
}
exit:
if (rc)
dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
}
-static void dp_hdcp2p2_manage_session(struct dp_hdcp2p2_ctrl *ctrl)
+static void dp_hdcp2p2_start_auth(struct dp_hdcp2p2_ctrl *ctrl)
{
- struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_INVALID};
-
+ struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_START_AUTH};
cdata.context = ctrl->lib_ctx;
if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
- cdata.cmd = HDCP_2X_CMD_START;
- else
- cdata.cmd = HDCP_2X_CMD_STOP;
-
- dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
}
static int dp_hdcp2p2_read_rx_status(struct dp_hdcp2p2_ctrl *ctrl,
@@ -617,34 +649,31 @@
static int dp_hdcp2p2_cp_irq(void *input)
{
- int rc = 0;
+ int rc;
struct dp_hdcp2p2_ctrl *ctrl = input;
- if (!ctrl) {
- pr_err("invalid input\n");
- return -EINVAL;
- }
+ rc = dp_hdcp2p2_valid_handle(ctrl);
+ if (rc)
+ return rc;
if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTH_FAIL ||
atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
pr_err("invalid hdcp state\n");
- rc = -EINVAL;
- goto error;
+ return -EINVAL;
}
ctrl->sink_rx_status = 0;
rc = dp_hdcp2p2_read_rx_status(ctrl, &ctrl->sink_rx_status);
if (rc) {
pr_err("failed to read rx status\n");
- goto error;
+ return rc;
}
pr_debug("sink_rx_status=0x%x\n", ctrl->sink_rx_status);
if (!ctrl->sink_rx_status) {
pr_debug("not a hdcp 2.2 irq\n");
- rc = -EINVAL;
- goto error;
+ return -EINVAL;
}
@@ -652,8 +681,6 @@
wake_up(&ctrl->wait_q);
return 0;
-error:
- return rc;
}
static int dp_hdcp2p2_isr(void *input)
@@ -721,6 +748,51 @@
return false;
}
+static int dp_hdcp2p2_change_streams(struct dp_hdcp2p2_ctrl *ctrl,
+ struct sde_hdcp_2x_wakeup_data *cdata)
+{
+ if (!ctrl || cdata->num_streams == 0 || !cdata->streams) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (!ctrl->lib_ctx) {
+ pr_err("HDCP library needs to be acquired\n");
+ return -EINVAL;
+ }
+
+ if (!ctrl->lib) {
+ pr_err("invalid lib ops data\n");
+ return -EINVAL;
+ }
+
+ cdata->context = ctrl->lib_ctx;
+ return ctrl->lib->wakeup(cdata);
+}
+
+
+static int dp_hdcp2p2_register_streams(void *input, u8 num_streams,
+ struct stream_info *streams)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+ struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_OPEN_STREAMS};
+
+ cdata.streams = streams;
+ cdata.num_streams = num_streams;
+ return dp_hdcp2p2_change_streams(ctrl, &cdata);
+}
+
+static int dp_hdcp2p2_deregister_streams(void *input, u8 num_streams,
+ struct stream_info *streams)
+{
+ struct dp_hdcp2p2_ctrl *ctrl = input;
+ struct sde_hdcp_2x_wakeup_data cdata = {HDCP_2X_CMD_CLOSE_STREAMS};
+
+ cdata.streams = streams;
+ cdata.num_streams = num_streams;
+ return dp_hdcp2p2_change_streams(ctrl, &cdata);
+}
+
void sde_dp_hdcp2p2_deinit(void *input)
{
struct dp_hdcp2p2_ctrl *ctrl = (struct dp_hdcp2p2_ctrl *)input;
@@ -731,9 +803,13 @@
return;
}
- cdata.cmd = HDCP_2X_CMD_STOP;
- cdata.context = ctrl->lib_ctx;
- dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ if (atomic_read(&ctrl->auth_state) != HDCP_STATE_AUTH_FAIL) {
+ cdata.cmd = HDCP_2X_CMD_STOP;
+ cdata.context = ctrl->lib_ctx;
+ dp_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ }
+
+ sde_hdcp_2x_deregister(ctrl->lib_ctx);
kthread_stop(ctrl->thread);
@@ -769,7 +845,10 @@
dp_hdcp2p2_send_msg(ctrl);
break;
case HDCP_TRANSPORT_CMD_RECV_MESSAGE:
- dp_hdcp2p2_recv_msg(ctrl);
+ if (ctrl->rx_status)
+ ctrl->polling = true;
+ else
+ dp_hdcp2p2_recv_msg(ctrl);
break;
case HDCP_TRANSPORT_CMD_STATUS_SUCCESS:
dp_hdcp2p2_send_auth_status(ctrl);
@@ -779,16 +858,13 @@
dp_hdcp2p2_send_auth_status(ctrl);
break;
case HDCP_TRANSPORT_CMD_LINK_POLL:
- if (ctrl->cp_irq_done)
- dp_hdcp2p2_recv_msg(ctrl);
- else
- ctrl->polling = true;
+ ctrl->polling = true;
break;
case HDCP_TRANSPORT_CMD_LINK_CHECK:
dp_hdcp2p2_link_check(ctrl);
break;
case HDCP_TRANSPORT_CMD_AUTHENTICATE:
- dp_hdcp2p2_manage_session(ctrl);
+ dp_hdcp2p2_start_auth(ctrl);
break;
default:
break;
@@ -809,8 +885,12 @@
.feature_supported = dp_hdcp2p2_feature_supported,
.force_encryption = dp_hdcp2p2_force_encryption,
.sink_support = dp_hdcp2p2_supported,
+ .set_mode = dp_hdcp2p2_register,
+ .on = dp_hdcp2p2_on,
.off = dp_hdcp2p2_off,
.cp_irq = dp_hdcp2p2_cp_irq,
+ .register_streams = dp_hdcp2p2_register_streams,
+ .deregister_streams = dp_hdcp2p2_deregister_streams,
};
static struct hdcp_transport_ops client_ops = {
@@ -865,7 +945,6 @@
register_data.hdcp_data = &ctrl->lib_ctx;
register_data.client_ops = &client_ops;
register_data.ops = &hdcp2x_ops;
- register_data.device_type = HDCP_TXMTR_DP;
register_data.client_data = ctrl;
rc = sde_hdcp_2x_register(®ister_data);
diff --git a/drivers/gpu/drm/msm/dp/dp_mst_drm.c b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
index f528485..508c6dc 100644
--- a/drivers/gpu/drm/msm/dp/dp_mst_drm.c
+++ b/drivers/gpu/drm/msm/dp/dp_mst_drm.c
@@ -21,8 +21,8 @@
#include "dp_drm.h"
#define DP_MST_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
+#define DP_MST_INFO_LOG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
-#define MAX_DP_MST_STREAMS 2
#define MAX_DP_MST_DRM_ENCODERS 2
#define MAX_DP_MST_DRM_BRIDGES 2
#define HPD_STRING_SIZE 30
@@ -93,12 +93,18 @@
struct drm_display_mode drm_mode;
struct dp_display_mode dp_mode;
struct drm_connector *connector;
+ struct drm_connector *old_connector;
void *dp_panel;
+ void *old_dp_panel;
int vcpi;
int pbn;
int num_slots;
int start_slot;
+
+ u32 fixed_port_num;
+ bool fixed_port_added;
+ struct drm_connector *fixed_connector;
};
struct dp_mst_private {
@@ -111,6 +117,7 @@
struct dp_mst_sim_mode simulator;
struct mutex mst_lock;
enum dp_drv_state state;
+ bool mst_session_state;
};
struct dp_mst_encoder_info_cache {
@@ -167,10 +174,13 @@
mutex_lock(&mstb->mgr->lock);
list_del(&port->next);
mutex_unlock(&mstb->mgr->lock);
- return;
+ goto put_port;
}
(*mstb->mgr->cbs->register_connector)(port->connector);
}
+
+put_port:
+ kref_put(&port->kref, NULL);
}
static void dp_mst_sim_link_probe_work(struct work_struct *work)
@@ -525,7 +535,8 @@
mst->dp_display->set_stream_info(mst->dp_display,
dp_bridge->dp_panel,
- dp_bridge->id, start_slot, num_slots, pbn);
+ dp_bridge->id, start_slot, num_slots, pbn,
+ dp_bridge->vcpi);
pr_info("bridge:%d vcpi:%d start_slot:%d num_slots:%d, pbn:%d\n",
dp_bridge->id, dp_bridge->vcpi,
@@ -550,7 +561,8 @@
mst->dp_display->set_stream_info(mst->dp_display,
mst_bridge->dp_panel,
- mst_bridge->id, start_slot, num_slots, pbn);
+ mst_bridge->id, start_slot, num_slots, pbn,
+ mst_bridge->vcpi);
}
}
@@ -672,8 +684,6 @@
struct dp_display *dp;
struct dp_mst_private *mst;
- DP_MST_DEBUG("enter\n");
-
if (!drm_bridge) {
pr_err("Invalid params\n");
return;
@@ -682,6 +692,9 @@
bridge = to_dp_mst_bridge(drm_bridge);
dp = bridge->display;
+ bridge->old_connector = NULL;
+ bridge->old_dp_panel = NULL;
+
if (!bridge->connector) {
pr_err("Invalid connector\n");
return;
@@ -718,7 +731,14 @@
_dp_mst_bridge_pre_enable_part2(bridge);
}
- DP_MST_DEBUG("mst bridge [%d] pre enable complete\n", bridge->id);
+ DP_MST_INFO_LOG("mode: id(%d) mode(%s), refresh(%d)\n",
+ bridge->id, bridge->drm_mode.name,
+ bridge->drm_mode.vrefresh);
+ DP_MST_INFO_LOG("dsc: id(%d) dsc(%d)\n", bridge->id,
+ bridge->dp_mode.timing.comp_info.comp_ratio);
+ DP_MST_INFO_LOG("channel: id(%d) vcpi(%d) start(%d) tot(%d)\n",
+ bridge->id, bridge->vcpi, bridge->start_slot,
+ bridge->num_slots);
end:
mutex_unlock(&mst->mst_lock);
}
@@ -729,8 +749,6 @@
struct dp_mst_bridge *bridge;
struct dp_display *dp;
- DP_MST_DEBUG("enter\n");
-
if (!drm_bridge) {
pr_err("Invalid params\n");
return;
@@ -751,7 +769,8 @@
return;
}
- DP_MST_DEBUG("mst bridge [%d] post enable complete\n", bridge->id);
+ DP_MST_INFO_LOG("mst bridge [%d] post enable complete\n",
+ bridge->id);
}
static void dp_mst_bridge_disable(struct drm_bridge *drm_bridge)
@@ -761,8 +780,6 @@
struct dp_display *dp;
struct dp_mst_private *mst;
- DP_MST_DEBUG("enter\n");
-
if (!drm_bridge) {
pr_err("Invalid params\n");
return;
@@ -791,7 +808,7 @@
_dp_mst_bridge_pre_disable_part2(bridge);
- DP_MST_DEBUG("mst bridge [%d] disable complete\n", bridge->id);
+ DP_MST_INFO_LOG("mst bridge [%d] disable complete\n", bridge->id);
mutex_unlock(&mst->mst_lock);
}
@@ -803,8 +820,6 @@
struct dp_display *dp;
struct dp_mst_private *mst;
- DP_MST_DEBUG("enter\n");
-
if (!drm_bridge) {
pr_err("Invalid params\n");
return;
@@ -832,12 +847,17 @@
/* maintain the connector to encoder link during suspend/resume */
if (mst->state != PM_SUSPEND) {
/* Disconnect the connector and panel info from bridge */
+ mst->mst_bridge[bridge->id].old_connector =
+ mst->mst_bridge[bridge->id].connector;
+ mst->mst_bridge[bridge->id].old_dp_panel =
+ mst->mst_bridge[bridge->id].dp_panel;
mst->mst_bridge[bridge->id].connector = NULL;
mst->mst_bridge[bridge->id].dp_panel = NULL;
mst->mst_bridge[bridge->id].encoder_active_sts = false;
}
- DP_MST_DEBUG("mst bridge [%d] post disable complete\n", bridge->id);
+ DP_MST_INFO_LOG("mst bridge [%d] post disable complete\n",
+ bridge->id);
}
static void dp_mst_bridge_mode_set(struct drm_bridge *drm_bridge,
@@ -856,13 +876,21 @@
bridge = to_dp_mst_bridge(drm_bridge);
if (!bridge->connector) {
- pr_err("Invalid connector\n");
- return;
+ if (!bridge->old_connector) {
+ pr_err("Invalid connector\n");
+ return;
+ }
+ bridge->connector = bridge->old_connector;
+ bridge->old_connector = NULL;
}
if (!bridge->dp_panel) {
- pr_err("Invalid dp_panel\n");
- return;
+ if (!bridge->old_dp_panel) {
+ pr_err("Invalid dp_panel\n");
+ return;
+ }
+ bridge->dp_panel = bridge->old_dp_panel;
+ bridge->old_dp_panel = NULL;
}
dp = bridge->display;
@@ -877,6 +905,10 @@
/* DP MST Bridge APIs */
+static struct drm_connector *
+dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
+ struct drm_encoder *encoder);
+
static const struct drm_bridge_funcs dp_mst_bridge_ops = {
.attach = dp_mst_bridge_attach,
.mode_fixup = dp_mst_bridge_mode_fixup,
@@ -944,6 +976,23 @@
DP_MST_DEBUG("mst drm bridge init. bridge id:%d\n", i);
+ /*
+ * If fixed topology port is defined, connector will be created
+ * immediately.
+ */
+ rc = display->mst_get_fixed_topology_port(display, bridge->id,
+ &bridge->fixed_port_num);
+ if (!rc) {
+ bridge->fixed_connector =
+ dp_mst_drm_fixed_connector_init(display,
+ bridge->encoder);
+ if (bridge->fixed_connector == NULL) {
+ pr_err("failed to create fixed connector\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+ }
+
return 0;
end:
@@ -1136,7 +1185,8 @@
}
for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
- if (!mst->mst_bridge[i].encoder_active_sts) {
+ if (!mst->mst_bridge[i].encoder_active_sts &&
+ !mst->mst_bridge[i].fixed_connector) {
mst->mst_bridge[i].encoder_active_sts = true;
mst->mst_bridge[i].connector = connector;
mst->mst_bridge[i].dp_panel = conn->drv_panel;
@@ -1343,6 +1393,7 @@
if (!connector) {
pr_err("mst sde_connector_init failed\n");
+ drm_modeset_unlock_all(dev);
return connector;
}
@@ -1350,6 +1401,7 @@
if (rc) {
pr_err("mst connector install failed\n");
sde_connector_destroy(connector);
+ drm_modeset_unlock_all(dev);
return NULL;
}
@@ -1372,7 +1424,7 @@
/* unlock connector and make it accessible */
drm_modeset_unlock_all(dev);
- DP_MST_DEBUG("add mst connector:%d\n", connector->base.id);
+ DP_MST_INFO_LOG("add mst connector id:%d\n", connector->base.id);
return connector;
}
@@ -1383,7 +1435,8 @@
connector->status = connector->funcs->detect(connector, false);
- DP_MST_DEBUG("register mst connector:%d\n", connector->base.id);
+ DP_MST_INFO_LOG("register mst connector id:%d\n",
+ connector->base.id);
drm_connector_register(connector);
}
@@ -1392,12 +1445,297 @@
{
DP_MST_DEBUG("enter\n");
- DP_MST_DEBUG("destroy mst connector:%d\n", connector->base.id);
+ DP_MST_INFO_LOG("destroy mst connector id:%d\n", connector->base.id);
drm_connector_unregister(connector);
drm_connector_put(connector);
}
+static enum drm_connector_status
+dp_mst_fixed_connector_detect(struct drm_connector *connector, bool force,
+ void *display)
+{
+ struct dp_display *dp_display = display;
+ struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
+ int i;
+
+ for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+ if (mst->mst_bridge[i].fixed_connector != connector)
+ continue;
+
+ if (!mst->mst_bridge[i].fixed_port_added)
+ break;
+
+ return dp_mst_connector_detect(connector, force, display);
+ }
+
+ return connector_status_disconnected;
+}
+
+static struct drm_encoder *
+dp_mst_fixed_atomic_best_encoder(struct drm_connector *connector,
+ void *display, struct drm_connector_state *state)
+{
+ struct dp_display *dp_display = display;
+ struct dp_mst_private *mst = dp_display->dp_mst_prv_info;
+ struct sde_connector *conn = to_sde_connector(connector);
+ struct drm_encoder *enc = NULL;
+ u32 i;
+
+ for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+ if (mst->mst_bridge[i].connector == connector) {
+ enc = mst->mst_bridge[i].encoder;
+ goto end;
+ }
+ }
+
+ for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+ if (mst->mst_bridge[i].fixed_connector == connector) {
+ mst->mst_bridge[i].encoder_active_sts = true;
+ mst->mst_bridge[i].connector = connector;
+ mst->mst_bridge[i].dp_panel = conn->drv_panel;
+ enc = mst->mst_bridge[i].encoder;
+ break;
+ }
+ }
+
+end:
+ if (enc)
+ DP_MST_DEBUG("mst connector:%d atomic best encoder:%d\n",
+ connector->base.id, i);
+ else
+ DP_MST_DEBUG("mst connector:%d atomic best encoder failed\n",
+ connector->base.id);
+
+ return enc;
+}
+
+static u32 dp_mst_find_fixed_port_num(struct drm_dp_mst_branch *mstb,
+ struct drm_dp_mst_port *target)
+{
+ struct drm_dp_mst_port *port;
+ u32 port_num = 0;
+
+ /*
+ * search through reversed order of adding sequence, so the port number
+ * will be unique once topology is fixed
+ */
+ list_for_each_entry_reverse(port, &mstb->ports, next) {
+ if (port->mstb)
+ port_num += dp_mst_find_fixed_port_num(port->mstb,
+ target);
+ else if (!port->input) {
+ ++port_num;
+ if (port == target)
+ break;
+ }
+ }
+
+ return port_num;
+}
+
+static struct drm_connector *
+dp_mst_find_fixed_connector(struct dp_mst_private *dp_mst,
+ struct drm_dp_mst_port *port)
+{
+ struct dp_display *dp_display = dp_mst->dp_display;
+ struct drm_connector *connector = NULL;
+ struct sde_connector *c_conn;
+ u32 port_num;
+ int i;
+
+ mutex_lock(&port->mgr->lock);
+ port_num = dp_mst_find_fixed_port_num(port->mgr->mst_primary, port);
+ mutex_unlock(&port->mgr->lock);
+
+ if (!port_num)
+ return NULL;
+
+ for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+ if (dp_mst->mst_bridge[i].fixed_port_num == port_num) {
+ connector = dp_mst->mst_bridge[i].fixed_connector;
+ c_conn = to_sde_connector(connector);
+ c_conn->mst_port = port;
+ dp_display->mst_connector_update_link_info(dp_display,
+ connector);
+ dp_mst->mst_bridge[i].fixed_port_added = true;
+ DP_MST_DEBUG("found fixed connector %d\n",
+ DRMID(connector));
+ break;
+ }
+ }
+
+ return connector;
+}
+
+static int
+dp_mst_find_first_available_encoder_idx(struct dp_mst_private *dp_mst)
+{
+ int enc_idx = MAX_DP_MST_DRM_BRIDGES;
+ int i;
+
+ for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+ if (!dp_mst->mst_bridge[i].fixed_connector) {
+ enc_idx = i;
+ break;
+ }
+ }
+
+ return enc_idx;
+}
+
+static struct drm_connector *
+dp_mst_add_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, const char *pathprop)
+{
+ struct dp_mst_private *dp_mst;
+ struct drm_device *dev;
+ struct dp_display *dp_display;
+ struct drm_connector *connector;
+ int i, enc_idx;
+
+ DP_MST_DEBUG("enter\n");
+
+ dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
+
+ dp_display = dp_mst->dp_display;
+ dev = dp_display->drm_dev;
+
+ if (port->input || port->mstb)
+ enc_idx = MAX_DP_MST_DRM_BRIDGES;
+ else {
+ /* if port is already reserved, return immediately */
+ connector = dp_mst_find_fixed_connector(dp_mst, port);
+ if (connector != NULL)
+ return connector;
+
+ /* first available bridge index for non-reserved port */
+ enc_idx = dp_mst_find_first_available_encoder_idx(dp_mst);
+ }
+
+ /* add normal connector */
+ connector = dp_mst_add_connector(mgr, port, pathprop);
+ if (!connector) {
+ DP_MST_DEBUG("failed to add connector\n");
+ return NULL;
+ }
+
+ drm_modeset_lock_all(dev);
+
+ /* clear encoder list */
+ for (i = 0; i < DRM_CONNECTOR_MAX_ENCODER; i++)
+ connector->encoder_ids[i] = 0;
+
+ /* re-attach encoders from first available encoders */
+ for (i = enc_idx; i < MAX_DP_MST_DRM_BRIDGES; i++)
+ drm_connector_attach_encoder(connector,
+ dp_mst->mst_bridge[i].encoder);
+
+ drm_modeset_unlock_all(dev);
+
+ DP_MST_DEBUG("add mst connector:%d\n", connector->base.id);
+
+ return connector;
+}
+
+static void dp_mst_register_fixed_connector(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct dp_display *dp_display = c_conn->display;
+ struct dp_mst_private *dp_mst = dp_display->dp_mst_prv_info;
+ int i;
+
+ DP_MST_DEBUG("enter\n");
+
+ /* skip connector registered for fixed topology ports */
+ for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+ if (dp_mst->mst_bridge[i].fixed_connector == connector) {
+ DP_MST_DEBUG("found fixed connector %d\n",
+ DRMID(connector));
+ return;
+ }
+ }
+
+ dp_mst_register_connector(connector);
+}
+
+static void dp_mst_destroy_fixed_connector(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_connector *connector)
+{
+ struct dp_mst_private *dp_mst;
+ int i;
+
+ DP_MST_DEBUG("enter\n");
+
+ dp_mst = container_of(mgr, struct dp_mst_private, mst_mgr);
+
+ /* skip connector destroy for fixed topology ports */
+ for (i = 0; i < MAX_DP_MST_DRM_BRIDGES; i++) {
+ if (dp_mst->mst_bridge[i].fixed_connector == connector) {
+ dp_mst->mst_bridge[i].fixed_port_added = false;
+ DP_MST_DEBUG("destroy fixed connector %d\n",
+ DRMID(connector));
+ return;
+ }
+ }
+
+ dp_mst_destroy_connector(mgr, connector);
+}
+
+static struct drm_connector *
+dp_mst_drm_fixed_connector_init(struct dp_display *dp_display,
+ struct drm_encoder *encoder)
+{
+ static const struct sde_connector_ops dp_mst_connector_ops = {
+ .post_init = NULL,
+ .detect = dp_mst_fixed_connector_detect,
+ .get_modes = dp_mst_connector_get_modes,
+ .mode_valid = dp_mst_connector_mode_valid,
+ .get_info = dp_mst_connector_get_info,
+ .get_mode_info = dp_mst_connector_get_mode_info,
+ .atomic_best_encoder = dp_mst_fixed_atomic_best_encoder,
+ .atomic_check = dp_mst_connector_atomic_check,
+ .config_hdr = dp_mst_connector_config_hdr,
+ .pre_destroy = dp_mst_connector_pre_destroy,
+ };
+ struct drm_device *dev;
+ struct drm_connector *connector;
+ int rc;
+
+ DP_MST_DEBUG("enter\n");
+
+ dev = dp_display->drm_dev;
+
+ connector = sde_connector_init(dev,
+ encoder,
+ NULL,
+ dp_display,
+ &dp_mst_connector_ops,
+ DRM_CONNECTOR_POLL_HPD,
+ DRM_MODE_CONNECTOR_DisplayPort);
+
+ if (!connector) {
+ pr_err("mst sde_connector_init failed\n");
+ return NULL;
+ }
+
+ rc = dp_display->mst_connector_install(dp_display, connector);
+ if (rc) {
+ pr_err("mst connector install failed\n");
+ sde_connector_destroy(connector);
+ return NULL;
+ }
+
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.path_property, 0);
+ drm_object_attach_property(&connector->base,
+ dev->mode_config.tile_property, 0);
+
+ DP_MST_DEBUG("add mst fixed connector:%d\n", connector->base.id);
+
+ return connector;
+}
+
static void dp_mst_hotplug(struct drm_dp_mst_topology_mgr *mgr)
{
struct dp_mst_private *mst = container_of(mgr, struct dp_mst_private,
@@ -1411,7 +1749,7 @@
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
- DP_MST_DEBUG("mst hot plug event\n");
+ DP_MST_INFO_LOG("mst hot plug event\n");
}
static void dp_mst_hpd_event_notify(struct dp_mst_private *mst, bool hpd_status)
@@ -1432,7 +1770,7 @@
kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE, envp);
- DP_MST_DEBUG("%s finished\n", __func__);
+ DP_MST_INFO_LOG("%s finished\n", __func__);
}
/* DP Driver Callback OPs */
@@ -1444,7 +1782,9 @@
struct dp_display *dp = dp_display;
struct dp_mst_private *mst = dp->dp_mst_prv_info;
- DP_MST_DEBUG("enter:\n");
+ mutex_lock(&mst->mst_lock);
+ mst->mst_session_state = hpd_status;
+ mutex_unlock(&mst->mst_lock);
if (!hpd_status)
rc = mst->mst_fw_cbs->topology_mgr_set_mst(&mst->mst_mgr,
@@ -1466,9 +1806,7 @@
dp_mst_hpd_event_notify(mst, hpd_status);
- DP_MST_DEBUG("mst display hpd:%d, rc:%d\n", hpd_status, rc);
-
- DP_MST_DEBUG("exit:\n");
+ DP_MST_INFO_LOG("mst display hpd:%d, rc:%d\n", hpd_status, rc);
}
static void dp_mst_display_hpd_irq(void *dp_display,
@@ -1477,26 +1815,29 @@
int rc;
struct dp_display *dp = dp_display;
struct dp_mst_private *mst = dp->dp_mst_prv_info;
- u8 esi[14], idx;
+ u8 esi[14];
unsigned int esi_res = DP_SINK_COUNT_ESI + 1;
bool handled;
- DP_MST_DEBUG("enter:\n");
-
if (info->mst_hpd_sim) {
dp_mst_hotplug(&mst->mst_mgr);
return;
}
+ if (!mst->mst_session_state) {
+ pr_err("mst_hpd_irq received before mst session start\n");
+ return;
+ }
+
rc = drm_dp_dpcd_read(mst->caps.drm_aux, DP_SINK_COUNT_ESI,
esi, 14);
if (rc != 14) {
- pr_err("dpcd sync status read failed, rlen=%d\n", rc);
- goto end;
+ pr_err("dpcd sink status read failed, rlen=%d\n", rc);
+ return;
}
- for (idx = 0; idx < 14; idx++)
- DP_MST_DEBUG("mst irq: esi[%d]: 0x%x\n", idx, esi[idx]);
+ DP_MST_DEBUG("mst irq: esi1[0x%x] esi2[0x%x] esi3[%x]\n",
+ esi[1], esi[2], esi[3]);
rc = drm_dp_mst_hpd_irq(&mst->mst_mgr, esi, &handled);
@@ -1509,9 +1850,6 @@
}
DP_MST_DEBUG("mst display hpd_irq handled:%d rc:%d\n", handled, rc);
-
-end:
- DP_MST_DEBUG("exit:\n");
}
static void dp_mst_set_state(void *dp_display, enum dp_drv_state mst_state)
@@ -1525,6 +1863,7 @@
}
mst->state = mst_state;
+ DP_MST_INFO_LOG("mst power state:%d\n", mst_state);
}
/* DP MST APIs */
@@ -1542,6 +1881,13 @@
.hotplug = dp_mst_hotplug,
};
+static const struct drm_dp_mst_topology_cbs dp_mst_fixed_drm_cbs = {
+ .add_connector = dp_mst_add_fixed_connector,
+ .register_connector = dp_mst_register_fixed_connector,
+ .destroy_connector = dp_mst_destroy_fixed_connector,
+ .hotplug = dp_mst_hotplug,
+};
+
static void dp_mst_sim_init(struct dp_mst_private *mst)
{
INIT_WORK(&mst->simulator.probe_work, dp_mst_sim_link_probe_work);
@@ -1606,7 +1952,11 @@
}
memset(&dp_mst_enc_cache, 0, sizeof(dp_mst_enc_cache));
- DP_MST_DEBUG("dp drm mst topology manager init completed\n");
+ /* choose fixed callback function if fixed topology is found */
+ if (!dp_display->mst_get_fixed_topology_port(dp_display, 0, NULL))
+ dp_mst.mst_mgr.cbs = &dp_mst_fixed_drm_cbs;
+
+ DP_MST_INFO_LOG("dp drm mst topology manager init completed\n");
return ret;
@@ -1637,6 +1987,6 @@
mutex_destroy(&mst->mst_lock);
- DP_MST_DEBUG("dp drm mst topology manager deinit completed\n");
+ DP_MST_INFO_LOG("dp drm mst topology manager deinit completed\n");
}
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.c b/drivers/gpu/drm/msm/dp/dp_panel.c
index eca7909..d98ebcf 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.c
+++ b/drivers/gpu/drm/msm/dp/dp_panel.c
@@ -1530,12 +1530,14 @@
struct dp_dsc_slices_per_line *rec;
int slice_width;
u32 ppr = dp_mode->timing.pixel_clk_khz/1000;
+ int max_slice_width;
comp_info->dsc_info.slice_per_pkt = 0;
for (i = 0; i < ARRAY_SIZE(slice_per_line_tbl); i++) {
rec = &slice_per_line_tbl[i];
if ((ppr > rec->min_ppr) && (ppr <= rec->max_ppr)) {
comp_info->dsc_info.slice_per_pkt = rec->num_slices;
+ i++;
break;
}
}
@@ -1543,9 +1545,21 @@
if (comp_info->dsc_info.slice_per_pkt == 0)
return -EINVAL;
+ max_slice_width = dp_panel->dsc_dpcd[12] * 320;
slice_width = (dp_mode->timing.h_active /
comp_info->dsc_info.slice_per_pkt);
+ while (slice_width >= max_slice_width) {
+ if (i == ARRAY_SIZE(slice_per_line_tbl))
+ return -EINVAL;
+
+ rec = &slice_per_line_tbl[i];
+ comp_info->dsc_info.slice_per_pkt = rec->num_slices;
+ slice_width = (dp_mode->timing.h_active /
+ comp_info->dsc_info.slice_per_pkt);
+ i++;
+ }
+
comp_info->dsc_info.block_pred_enable =
dp_panel->sink_dsc_caps.block_pred_en;
comp_info->dsc_info.vbr_enable = 0;
@@ -1657,8 +1671,8 @@
panel->minor = link_info->revision & 0x0f;
pr_debug("version: %d.%d\n", panel->major, panel->minor);
- link_info->rate =
- drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]);
+ link_info->rate = min_t(unsigned long, panel->parser->max_lclk_khz,
+ drm_dp_bw_code_to_link_rate(dp_panel->dpcd[DP_MAX_LINK_RATE]));
pr_debug("link_rate=%d\n", link_info->rate);
link_info->num_lanes = dp_panel->dpcd[DP_MAX_LANE_COUNT] &
@@ -2305,13 +2319,14 @@
static int dp_panel_set_stream_info(struct dp_panel *dp_panel,
enum dp_stream_id stream_id, u32 ch_start_slot,
- u32 ch_tot_slots, u32 pbn)
+ u32 ch_tot_slots, u32 pbn, int vcpi)
{
if (!dp_panel || stream_id > DP_STREAM_MAX) {
pr_err("invalid input. stream_id: %d\n", stream_id);
return -EINVAL;
}
+ dp_panel->vcpi = vcpi;
dp_panel->stream_id = stream_id;
dp_panel->channel_start_slot = ch_start_slot;
dp_panel->channel_total_slots = ch_tot_slots;
@@ -2376,7 +2391,7 @@
if (!panel->custom_edid && dp_panel->edid_ctrl->edid)
sde_free_edid((void **)&dp_panel->edid_ctrl);
- dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0);
+ dp_panel_set_stream_info(dp_panel, DP_STREAM_MAX, 0, 0, 0, 0);
memset(&dp_panel->pinfo, 0, sizeof(dp_panel->pinfo));
memset(&hdr->hdr_meta, 0, sizeof(hdr->hdr_meta));
panel->panel_on = false;
@@ -2931,6 +2946,8 @@
if (in->base_panel) {
memcpy(dp_panel->dpcd, in->base_panel->dpcd,
DP_RECEIVER_CAP_SIZE + 1);
+ memcpy(dp_panel->dsc_dpcd, in->base_panel->dsc_dpcd,
+ DP_RECEIVER_DSC_CAP_SIZE + 1);
memcpy(&dp_panel->link_info, &in->base_panel->link_info,
sizeof(dp_panel->link_info));
dp_panel->mst_state = in->base_panel->mst_state;
diff --git a/drivers/gpu/drm/msm/dp/dp_panel.h b/drivers/gpu/drm/msm/dp/dp_panel.h
index 90d5346..dc96090 100644
--- a/drivers/gpu/drm/msm/dp/dp_panel.h
+++ b/drivers/gpu/drm/msm/dp/dp_panel.h
@@ -110,6 +110,7 @@
* Client sets the stream id value using set_stream_id interface.
*/
enum dp_stream_id stream_id;
+ int vcpi;
u32 channel_start_slot;
u32 channel_total_slots;
@@ -154,7 +155,7 @@
int (*set_stream_info)(struct dp_panel *dp_panel,
enum dp_stream_id stream_id, u32 ch_start_slot,
- u32 ch_tot_slots, u32 pbn);
+ u32 ch_tot_slots, u32 pbn, int vcpi);
int (*read_sink_status)(struct dp_panel *dp_panel, u8 *sts, u32 size);
int (*update_edid)(struct dp_panel *dp_panel, struct edid *edid);
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.c b/drivers/gpu/drm/msm/dp/dp_parser.c
index b0a6d24..bc4369d 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.c
+++ b/drivers/gpu/drm/msm/dp/dp_parser.c
@@ -151,11 +151,22 @@
parser->l_map[i] = data[i];
}
+ data = of_get_property(of_node, "qcom,pn-swap-lane-map", &len);
+ if (data && (len == DP_MAX_PHY_LN)) {
+ for (i = 0; i < len; i++)
+ parser->l_pnswap |= (data[i] & 0x01) << i;
+ }
+
rc = of_property_read_u32(of_node,
"qcom,max-pclk-frequency-khz", &parser->max_pclk_khz);
if (rc)
parser->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
+ rc = of_property_read_u32(of_node,
+ "qcom,max-lclk-frequency-khz", &parser->max_lclk_khz);
+ if (rc)
+ parser->max_lclk_khz = DP_MAX_LINK_CLK_KHZ;
+
return 0;
}
@@ -692,6 +703,7 @@
static int dp_parser_mst(struct dp_parser *parser)
{
struct device *dev = &parser->pdev->dev;
+ int i;
parser->has_mst = of_property_read_bool(dev->of_node,
"qcom,mst-enable");
@@ -699,6 +711,12 @@
pr_debug("mst parsing successful. mst:%d\n", parser->has_mst);
+ for (i = 0; i < MAX_DP_MST_STREAMS; i++) {
+ of_property_read_u32_index(dev->of_node,
+ "qcom,mst-fixed-topology-ports", i,
+ &parser->mst_fixed_port[i]);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/msm/dp/dp_parser.h b/drivers/gpu/drm/msm/dp/dp_parser.h
index 7fb90c9..9caa1a7 100644
--- a/drivers/gpu/drm/msm/dp/dp_parser.h
+++ b/drivers/gpu/drm/msm/dp/dp_parser.h
@@ -11,6 +11,8 @@
#define DP_LABEL "MDSS DP DISPLAY"
#define AUX_CFG_LEN 10
#define DP_MAX_PIXEL_CLK_KHZ 675000
+#define DP_MAX_LINK_CLK_KHZ 810000
+#define MAX_DP_MST_STREAMS 2
enum dp_pm_type {
DP_CORE_PM,
@@ -181,6 +183,9 @@
* @mp: gpio, regulator and clock related data
* @pinctrl: pin-control related data
* @disp_data: controller's display related data
+ * @l_pnswap: P/N swap status on each lane
+ * @max_pclk_khz: maximum pixel clock supported for the platform
+ * @max_lclk_khz: maximum link clock supported for the platform
* @hw_cfg: DP HW specific settings
* @has_mst: MST feature enable status
* @has_mst_sideband: MST sideband feature enable status
@@ -191,6 +196,7 @@
* @max_dp_dsc_blks: maximum DSC blks for DP interface
* @max_dp_dsc_input_width_pixs: Maximum input width for DSC block
* @has_widebus: widebus (2PPC) feature eanble status
+ *@mst_fixed_port: mst port_num reserved for fixed topology
* @parse: function to be called by client to parse device tree.
* @get_io: function to be called by client to get io data.
* @get_io_buf: function to be called by client to get io buffers.
@@ -205,8 +211,10 @@
struct dp_display_data disp_data;
u8 l_map[4];
+ u8 l_pnswap;
struct dp_aux_cfg aux_cfg[AUX_CFG_LEN];
u32 max_pclk_khz;
+ u32 max_lclk_khz;
struct dp_hw_cfg hw_cfg;
bool has_mst;
bool has_mst_sideband;
@@ -218,6 +226,7 @@
u32 max_dp_dsc_blks;
u32 max_dp_dsc_input_width_pixs;
bool lphw_hpd;
+ u32 mst_fixed_port[MAX_DP_MST_STREAMS];
int (*parse)(struct dp_parser *parser);
struct dp_io_data *(*get_io)(struct dp_parser *parser, char *name);
diff --git a/drivers/gpu/drm/msm/dp/dp_reg.h b/drivers/gpu/drm/msm/dp/dp_reg.h
index 5089f0c..7f9391d 100644
--- a/drivers/gpu/drm/msm/dp/dp_reg.h
+++ b/drivers/gpu/drm/msm/dp/dp_reg.h
@@ -351,12 +351,14 @@
#define TXn_TX_EMP_POST1_LVL (0x000C)
#define TXn_TX_DRV_LVL (0x001C)
+#define TXn_TX_POL_INV (0x0064)
#define DP_PHY_AUX_INTERRUPT_MASK_V420 (0x0054)
#define DP_PHY_AUX_INTERRUPT_CLEAR_V420 (0x0058)
#define DP_PHY_AUX_INTERRUPT_STATUS_V420 (0x00D8)
#define DP_PHY_SPARE0_V420 (0x00C8)
#define TXn_TX_DRV_LVL_V420 (0x0014)
+#define TXn_TX_POL_INV_V420 (0x005C)
#define QSERDES_COM_BIAS_EN_CLKBUFLR_EN (0x004)
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
index ae2ce71..15ad347 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -172,7 +172,7 @@
{
struct dsi_display *dsi_display = display;
struct dsi_panel *panel;
- u32 bl_scale, bl_scale_ad;
+ u32 bl_scale, bl_scale_sv;
u64 bl_temp;
int rc = 0;
@@ -193,12 +193,11 @@
bl_scale = panel->bl_config.bl_scale;
bl_temp = bl_lvl * bl_scale / MAX_BL_SCALE_LEVEL;
- bl_scale_ad = panel->bl_config.bl_scale_ad;
- bl_temp = (u32)bl_temp * bl_scale_ad / MAX_AD_BL_SCALE_LEVEL;
+ bl_scale_sv = panel->bl_config.bl_scale_sv;
+ bl_temp = (u32)bl_temp * bl_scale_sv / MAX_SV_BL_SCALE_LEVEL;
- pr_debug("bl_scale = %u, bl_scale_ad = %u, bl_lvl = %u\n",
- bl_scale, bl_scale_ad, (u32)bl_temp);
-
+ pr_debug("bl_scale = %u, bl_scale_sv = %u, bl_lvl = %u\n",
+ bl_scale, bl_scale_sv, (u32)bl_temp);
rc = dsi_display_clk_ctrl(dsi_display->dsi_clk_handle,
DSI_CORE_CLK, DSI_CLK_ON);
if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
index 5e9d3ac..730a2c2 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -2032,7 +2032,7 @@
}
panel->bl_config.bl_scale = MAX_BL_SCALE_LEVEL;
- panel->bl_config.bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+ panel->bl_config.bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
rc = utils->read_u32(utils->data, "qcom,mdss-dsi-bl-min-level", &val);
if (rc) {
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
index 8d9cfea..a2dcebb 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -23,7 +23,7 @@
#define MAX_BL_LEVEL 4096
#define MAX_BL_SCALE_LEVEL 1024
-#define MAX_AD_BL_SCALE_LEVEL 65535
+#define MAX_SV_BL_SCALE_LEVEL 65535
#define DSI_CMD_PPS_SIZE 135
#define DSI_MODE_MAX 5
@@ -90,7 +90,7 @@
u32 brightness_max_level;
u32 bl_level;
u32 bl_scale;
- u32 bl_scale_ad;
+ u32 bl_scale_sv;
int en_gpio;
/* PWM params */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index e8da71d..9a36012 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index ab26608..b36f62a 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -241,7 +241,8 @@
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource: %s\n", name);
+ dev_dbg(&pdev->dev, "failed to get memory resource: %s\n",
+ name);
return ERR_PTR(-EINVAL);
}
@@ -270,7 +271,7 @@
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
if (!res) {
- dev_err(&pdev->dev, "failed to get memory resource: %s\n",
+ dev_dbg(&pdev->dev, "failed to get memory resource: %s\n",
name);
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index d60eb34..b8de212 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -189,7 +189,7 @@
CONNECTOR_PROP_DST_H,
CONNECTOR_PROP_ROI_V1,
CONNECTOR_PROP_BL_SCALE,
- CONNECTOR_PROP_AD_BL_SCALE,
+ CONNECTOR_PROP_SV_BL_SCALE,
/* enum/bitmask properties */
CONNECTOR_PROP_TOPOLOGY_NAME,
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
index 48f97a0..b380481 100644
--- a/drivers/gpu/drm/msm/sde/sde_color_processing.c
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -2160,7 +2160,7 @@
static void sde_cp_notify_ad_event(struct drm_crtc *crtc_drm, void *arg)
{
uint32_t input_bl = 0, output_bl = 0;
- uint32_t scale = MAX_AD_BL_SCALE_LEVEL;
+ uint32_t scale = MAX_SV_BL_SCALE_LEVEL;
struct sde_hw_mixer *hw_lm = NULL;
struct sde_hw_dspp *hw_dspp = NULL;
u32 num_mixers;
@@ -2208,7 +2208,7 @@
if (!input_bl || input_bl < output_bl)
return;
- scale = (output_bl * MAX_AD_BL_SCALE_LEVEL) / input_bl;
+ scale = (output_bl * MAX_SV_BL_SCALE_LEVEL) / input_bl;
event.length = sizeof(u32);
event.type = DRM_EVENT_AD_BACKLIGHT;
msm_mode_object_event_notify(&crtc_drm->base, crtc_drm->dev,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index c3be2fb..d87a981 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -539,13 +539,13 @@
else
bl_config->bl_scale = c_conn->bl_scale;
- if (c_conn->bl_scale_ad > MAX_AD_BL_SCALE_LEVEL)
- bl_config->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+ if (c_conn->bl_scale_sv > MAX_SV_BL_SCALE_LEVEL)
+ bl_config->bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
else
- bl_config->bl_scale_ad = c_conn->bl_scale_ad;
+ bl_config->bl_scale_sv = c_conn->bl_scale_sv;
- SDE_DEBUG("bl_scale = %u, bl_scale_ad = %u, bl_level = %u\n",
- bl_config->bl_scale, bl_config->bl_scale_ad,
+ SDE_DEBUG("bl_scale = %u, bl_scale_sv = %u, bl_level = %u\n",
+ bl_config->bl_scale, bl_config->bl_scale_sv,
bl_config->bl_level);
rc = c_conn->ops.set_backlight(&c_conn->base,
dsi_display, bl_config->bl_level);
@@ -615,7 +615,7 @@
mutex_unlock(&c_conn->lock);
break;
case CONNECTOR_PROP_BL_SCALE:
- case CONNECTOR_PROP_AD_BL_SCALE:
+ case CONNECTOR_PROP_SV_BL_SCALE:
_sde_connector_update_bl_scale(c_conn);
break;
case CONNECTOR_PROP_HDR_METADATA:
@@ -1257,7 +1257,7 @@
if (rc)
SDE_ERROR_CONN(c_conn, "invalid roi_v1, rc: %d\n", rc);
break;
- /* CONNECTOR_PROP_BL_SCALE and CONNECTOR_PROP_AD_BL_SCALE are
+ /* CONNECTOR_PROP_BL_SCALE and CONNECTOR_PROP_SV_BL_SCALE are
* color-processing properties. These two properties require
* special handling since they don't quite fit the current standard
* atomic set property framework.
@@ -1266,8 +1266,8 @@
c_conn->bl_scale = val;
c_conn->bl_scale_dirty = true;
break;
- case CONNECTOR_PROP_AD_BL_SCALE:
- c_conn->bl_scale_ad = val;
+ case CONNECTOR_PROP_SV_BL_SCALE:
+ c_conn->bl_scale_sv = val;
c_conn->bl_scale_dirty = true;
break;
case CONNECTOR_PROP_HDR_METADATA:
@@ -2249,13 +2249,13 @@
0x0, 0, MAX_BL_SCALE_LEVEL, MAX_BL_SCALE_LEVEL,
CONNECTOR_PROP_BL_SCALE);
- msm_property_install_range(&c_conn->property_info, "ad_bl_scale",
- 0x0, 0, MAX_AD_BL_SCALE_LEVEL, MAX_AD_BL_SCALE_LEVEL,
- CONNECTOR_PROP_AD_BL_SCALE);
+ msm_property_install_range(&c_conn->property_info, "sv_bl_scale",
+ 0x0, 0, MAX_SV_BL_SCALE_LEVEL, MAX_SV_BL_SCALE_LEVEL,
+ CONNECTOR_PROP_SV_BL_SCALE);
c_conn->bl_scale_dirty = false;
c_conn->bl_scale = MAX_BL_SCALE_LEVEL;
- c_conn->bl_scale_ad = MAX_AD_BL_SCALE_LEVEL;
+ c_conn->bl_scale_sv = MAX_SV_BL_SCALE_LEVEL;
/* enum/bitmask properties */
msm_property_install_enum(&c_conn->property_info, "topology_name",
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index a1bd65e..0db872f 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -372,7 +372,7 @@
* @esd_status_check: Flag to indicate if ESD thread is scheduled or not
* @bl_scale_dirty: Flag to indicate PP BL scale value(s) is changed
* @bl_scale: BL scale value for ABA feature
- * @bl_scale_ad: BL scale value for AD feature
+ * @bl_scale_sv: BL scale value for sunlight visibility feature
* @unset_bl_level: BL level that needs to be set later
* @allow_bl_update: Flag to indicate if BL update is allowed currently or not
* @qsync_mode: Cached Qsync mode, 0=disabled, 1=continuous mode
@@ -423,7 +423,7 @@
bool bl_scale_dirty;
u32 bl_scale;
- u32 bl_scale_ad;
+ u32 bl_scale_sv;
u32 unset_bl_level;
bool allow_bl_update;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
index 2a029c8..f28a0a2 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -223,6 +223,7 @@
* @elevated_ahb_vote: increase AHB bus speed for the first frame
* after power collapse
* @pm_qos_cpu_req: pm_qos request for cpu frequency
+ * @mode_info: stores the current mode information
*/
struct sde_encoder_virt {
struct drm_encoder base;
@@ -282,6 +283,7 @@
bool recovery_events_enabled;
bool elevated_ahb_vote;
struct pm_qos_request pm_qos_cpu_req;
+ struct msm_mode_info mode_info;
};
#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
@@ -340,67 +342,16 @@
pm_qos_remove_request(&sde_enc->pm_qos_cpu_req);
}
-static struct drm_connector_state *_sde_encoder_get_conn_state(
- struct drm_encoder *drm_enc)
-{
- struct msm_drm_private *priv;
- struct sde_kms *sde_kms;
- struct list_head *connector_list;
- struct drm_connector *conn_iter;
-
- if (!drm_enc) {
- SDE_ERROR("invalid argument\n");
- return NULL;
- }
-
- priv = drm_enc->dev->dev_private;
- sde_kms = to_sde_kms(priv->kms);
- connector_list = &sde_kms->dev->mode_config.connector_list;
-
- list_for_each_entry(conn_iter, connector_list, head)
- if (conn_iter->encoder == drm_enc)
- return conn_iter->state;
-
- return NULL;
-}
-
-static int _sde_encoder_get_mode_info(struct drm_encoder *drm_enc,
- struct msm_mode_info *mode_info)
-{
- struct drm_connector_state *conn_state;
-
- if (!drm_enc || !mode_info) {
- SDE_ERROR("invalid arguments\n");
- return -EINVAL;
- }
-
- conn_state = _sde_encoder_get_conn_state(drm_enc);
- if (!conn_state) {
- SDE_ERROR("invalid connector state for the encoder: %d\n",
- drm_enc->base.id);
- return -EINVAL;
- }
-
- return sde_connector_get_mode_info(conn_state, mode_info);
-}
-
static bool _sde_encoder_is_dsc_enabled(struct drm_encoder *drm_enc)
{
+ struct sde_encoder_virt *sde_enc;
struct msm_compression_info *comp_info;
- struct msm_mode_info mode_info;
- int rc = 0;
if (!drm_enc)
return false;
- rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
- if (rc) {
- SDE_ERROR("failed to get mode info, enc: %d\n",
- drm_enc->base.id);
- return false;
- }
-
- comp_info = &mode_info.comp_info;
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ comp_info = &sde_enc->mode_info.comp_info;
return (comp_info->comp_type == MSM_DISPLAY_COMPRESSION_DSC);
}
@@ -712,8 +663,7 @@
struct drm_connector_state *conn_state)
{
struct sde_encoder_virt *sde_enc = NULL;
- struct msm_mode_info mode_info;
- int rc, i = 0;
+ int i = 0;
if (!hw_res || !drm_enc || !conn_state) {
SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
@@ -735,18 +685,8 @@
phys->ops.get_hw_resources(phys, hw_res, conn_state);
}
- /**
- * NOTE: Do not use sde_encoder_get_mode_info here as this function is
- * called from atomic_check phase. Use the below API to get mode
- * information of the temporary conn_state passed.
- */
- rc = sde_connector_get_mode_info(conn_state, &mode_info);
- if (rc) {
- SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
- return;
- }
-
- hw_res->topology = mode_info.topology;
+ sde_connector_get_mode_info(conn_state, &sde_enc->mode_info);
+ hw_res->topology = sde_enc->mode_info.topology;
hw_res->is_primary = sde_enc->disp_info.is_primary;
}
@@ -1334,27 +1274,20 @@
struct sde_hw_dsc *hw_dsc = sde_enc->hw_dsc[0];
struct sde_encoder_phys *enc_master = sde_enc->cur_master;
const struct sde_rect *roi = &sde_enc->cur_conn_roi;
- struct msm_mode_info mode_info;
struct msm_display_dsc_info *dsc = NULL;
struct sde_hw_ctl *hw_ctl;
struct sde_ctl_dsc_cfg cfg;
- int rc;
if (hw_dsc == NULL || hw_pp == NULL || !enc_master) {
SDE_ERROR_ENC(sde_enc, "invalid params for DSC\n");
return -EINVAL;
}
- rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
- if (rc) {
- SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
- return -EINVAL;
- }
hw_ctl = enc_master->hw_ctl;
memset(&cfg, 0, sizeof(cfg));
- dsc = &mode_info.comp_info.dsc_info;
+ dsc = &sde_enc->mode_info.comp_info.dsc_info;
_sde_encoder_dsc_update_pic_dim(dsc, roi->w, roi->h);
this_frame_slices = roi->w / dsc->slice_width;
@@ -1407,11 +1340,10 @@
struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
struct msm_display_dsc_info dsc[MAX_CHANNELS_PER_ENC];
- struct msm_mode_info mode_info;
bool half_panel_partial_update;
struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl;
struct sde_ctl_dsc_cfg cfg;
- int i, rc;
+ int i;
memset(&cfg, 0, sizeof(cfg));
@@ -1426,12 +1358,6 @@
}
}
- rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
- if (rc) {
- SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
- return -EINVAL;
- }
-
half_panel_partial_update =
hweight_long(params->affected_displays) == 1;
@@ -1441,8 +1367,8 @@
if (enc_master->intf_mode == INTF_MODE_VIDEO)
dsc_common_mode |= DSC_MODE_VIDEO;
- memcpy(&dsc[0], &mode_info.comp_info.dsc_info, sizeof(dsc[0]));
- memcpy(&dsc[1], &mode_info.comp_info.dsc_info, sizeof(dsc[1]));
+ memcpy(&dsc[0], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[0]));
+ memcpy(&dsc[1], &sde_enc->mode_info.comp_info.dsc_info, sizeof(dsc[1]));
/*
* Since both DSC use same pic dimension, set same pic dimension
@@ -1530,11 +1456,10 @@
struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
struct sde_hw_pingpong *hw_dsc_pp[MAX_CHANNELS_PER_ENC];
struct msm_display_dsc_info *dsc = NULL;
- struct msm_mode_info mode_info;
bool half_panel_partial_update;
struct sde_hw_ctl *hw_ctl = enc_master->hw_ctl;
struct sde_ctl_dsc_cfg cfg;
- int i, rc;
+ int i;
memset(&cfg, 0, sizeof(cfg));
@@ -1549,13 +1474,7 @@
}
}
- rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
- if (rc) {
- SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
- return -EINVAL;
- }
-
- dsc = &mode_info.comp_info.dsc_info;
+ dsc = &sde_enc->mode_info.comp_info.dsc_info;
half_panel_partial_update =
hweight_long(params->affected_displays) == 1;
@@ -1720,9 +1639,8 @@
struct sde_kms *sde_kms;
struct sde_hw_mdp *hw_mdptop;
struct drm_encoder *drm_enc;
- struct msm_mode_info mode_info;
struct sde_encoder_virt *sde_enc;
- int i, rc = 0;
+ int i;
sde_enc = to_sde_encoder_virt(phys_enc->parent);
@@ -1752,18 +1670,12 @@
return;
}
- rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
- if (rc) {
- SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
- return;
- }
-
if (hw_mdptop->ops.setup_vsync_source) {
for (i = 0; i < sde_enc->num_phys_encs; i++)
vsync_cfg.ppnumber[i] = sde_enc->hw_pp[i]->idx;
vsync_cfg.pp_count = sde_enc->num_phys_encs;
- vsync_cfg.frame_rate = mode_info.frame_rate;
+ vsync_cfg.frame_rate = sde_enc->mode_info.frame_rate;
vsync_cfg.vsync_source = vsync_source;
vsync_cfg.is_dummy = is_dummy;
@@ -1955,9 +1867,8 @@
struct sde_rsc_cmd_config *rsc_config;
int ret, prefill_lines;
struct msm_display_info *disp_info;
- struct msm_mode_info mode_info;
+ struct msm_mode_info *mode_info;
int wait_vblank_crtc_id = SDE_RSC_INVALID_CRTC_ID;
- int rc = 0;
u32 qsync_mode = 0;
if (!drm_enc || !drm_enc->dev) {
@@ -1966,6 +1877,8 @@
}
sde_enc = to_sde_encoder_virt(drm_enc);
+ mode_info = &sde_enc->mode_info;
+
crtc = sde_enc->crtc;
if (!sde_enc->crtc) {
@@ -1980,12 +1893,6 @@
return 0;
}
- rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
- if (rc) {
- SDE_ERROR_ENC(sde_enc, "failed to mode info\n");
- return 0;
- }
-
/**
* only primary command mode panel without Qsync can request CMD state.
* all other panels/displays can request for VID state including
@@ -2006,19 +1913,19 @@
SDE_EVT32(rsc_state, qsync_mode);
- prefill_lines = mode_info.prefill_lines;
+ prefill_lines = mode_info->prefill_lines;
/* compare specific items and reconfigure the rsc */
- if ((rsc_config->fps != mode_info.frame_rate) ||
- (rsc_config->vtotal != mode_info.vtotal) ||
+ if ((rsc_config->fps != mode_info->frame_rate) ||
+ (rsc_config->vtotal != mode_info->vtotal) ||
(rsc_config->prefill_lines != prefill_lines) ||
- (rsc_config->jitter_numer != mode_info.jitter_numer) ||
- (rsc_config->jitter_denom != mode_info.jitter_denom)) {
- rsc_config->fps = mode_info.frame_rate;
- rsc_config->vtotal = mode_info.vtotal;
+ (rsc_config->jitter_numer != mode_info->jitter_numer) ||
+ (rsc_config->jitter_denom != mode_info->jitter_denom)) {
+ rsc_config->fps = mode_info->frame_rate;
+ rsc_config->vtotal = mode_info->vtotal;
rsc_config->prefill_lines = prefill_lines;
- rsc_config->jitter_numer = mode_info.jitter_numer;
- rsc_config->jitter_denom = mode_info.jitter_denom;
+ rsc_config->jitter_numer = mode_info->jitter_numer;
+ rsc_config->jitter_denom = mode_info->jitter_denom;
sde_enc->rsc_state_init = false;
}
@@ -3166,7 +3073,6 @@
int i, ret = 0;
struct msm_compression_info *comp_info = NULL;
struct drm_display_mode *cur_mode = NULL;
- struct msm_mode_info mode_info;
struct msm_display_info *disp_info;
if (!drm_enc) {
@@ -3181,16 +3087,10 @@
return;
}
- ret = _sde_encoder_get_mode_info(drm_enc, &mode_info);
- if (ret) {
- SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
- return;
- }
-
if (drm_enc->crtc && !sde_enc->crtc)
sde_enc->crtc = drm_enc->crtc;
- comp_info = &mode_info.comp_info;
+ comp_info = &sde_enc->mode_info.comp_info;
cur_mode = &sde_enc->base.crtc->state->adjusted_mode;
SDE_DEBUG_ENC(sde_enc, "\n");
@@ -3244,7 +3144,7 @@
phys->comp_type = comp_info->comp_type;
phys->comp_ratio = comp_info->comp_ratio;
- phys->wide_bus_en = mode_info.wide_bus_en;
+ phys->wide_bus_en = sde_enc->mode_info.wide_bus_en;
phys->frame_trigger_mode = sde_enc->frame_trigger_mode;
if (phys->comp_type == MSM_DISPLAY_COMPRESSION_DSC) {
phys->dsc_extra_pclk_cycle_cnt =
@@ -3372,6 +3272,7 @@
* outstanding events and timers have been completed
*/
sde_enc->crtc = NULL;
+ memset(&sde_enc->mode_info, 0, sizeof(sde_enc->mode_info));
SDE_DEBUG_ENC(sde_enc, "encoder disabled\n");
@@ -4219,11 +4120,10 @@
static void _sde_encoder_setup_dither(struct sde_encoder_phys *phys)
{
void *dither_cfg;
- int ret = 0, rc, i = 0;
+ int ret = 0, i = 0;
size_t len = 0;
enum sde_rm_topology_name topology;
struct drm_encoder *drm_enc;
- struct msm_mode_info mode_info;
struct msm_display_dsc_info *dsc = NULL;
struct sde_encoder_virt *sde_enc;
struct sde_hw_pingpong *hw_pp;
@@ -4239,13 +4139,7 @@
drm_enc = phys->parent;
sde_enc = to_sde_encoder_virt(drm_enc);
- rc = _sde_encoder_get_mode_info(&sde_enc->base, &mode_info);
- if (rc) {
- SDE_ERROR_ENC(sde_enc, "failed to get mode info\n");
- return;
- }
-
- dsc = &mode_info.comp_info.dsc_info;
+ dsc = &sde_enc->mode_info.comp_info.dsc_info;
/* disable dither for 10 bpp or 10bpc dsc config */
if (dsc->bpp == 10 || dsc->bpc == 10) {
phys->hw_pp->ops.setup_dither(phys->hw_pp, NULL, 0);
@@ -5596,22 +5490,16 @@
u32 sde_encoder_get_fps(struct drm_encoder *drm_enc)
{
- struct msm_mode_info mode_info;
- int rc;
+ struct sde_encoder_virt *sde_enc;
if (!drm_enc) {
SDE_ERROR("invalid encoder\n");
return 0;
}
- rc = _sde_encoder_get_mode_info(drm_enc, &mode_info);
- if (rc) {
- SDE_ERROR_ENC(to_sde_encoder_virt(drm_enc),
- "failed to get mode info\n");
- return 0;
- }
+ sde_enc = to_sde_encoder_virt(drm_enc);
- return mode_info.frame_rate;
+ return sde_enc->mode_info.frame_rate;
}
enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index e0bb15d8..738ecbb 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -3970,8 +3970,10 @@
kfree(sde_cfg->vbif[i].qos_tbl[j].priority_lvl);
}
- for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++)
+ for (i = 0; i < SDE_QOS_LUT_USAGE_MAX; i++) {
+ kfree(sde_cfg->perf.sfe_lut_tbl[i].entries);
kfree(sde_cfg->perf.qos_lut_tbl[i].entries);
+ }
kfree(sde_cfg->dma_formats);
kfree(sde_cfg->cursor_formats);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index 06628d4..219ee07 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -2967,7 +2967,7 @@
node = of_find_node_by_name(parent, node_name);
if (!node) {
- SDE_ERROR("failed to find node %s\n", node_name);
+ SDE_DEBUG("failed to find node %s\n", node_name);
return -EINVAL;
}
@@ -2987,7 +2987,7 @@
data->num_splash_displays = num_displays;
- pr_info("splash mem num_regions:%d\n", num_regions);
+ SDE_DEBUG("splash mem num_regions:%d\n", num_regions);
if (num_displays > num_regions) {
share_splash_mem = true;
pr_info(":%d displays share same splash buf\n", num_displays);
@@ -3020,7 +3020,7 @@
data->splash_display[i].splash = &data->splash_mem[0];
}
- pr_info("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
+ SDE_DEBUG("splash mem for disp:%d add:%lx size:%x\n", (i + 1),
splash_display->splash->splash_buf_base,
splash_display->splash->splash_buf_size);
}
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
index 03178ca..5c1dc4a 100644
--- a/drivers/gpu/drm/msm/sde_hdcp.h
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2012, 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012, 2014-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __SDE_HDCP_H__
@@ -13,12 +13,15 @@
#include <linux/debugfs.h>
#include <linux/of_device.h>
#include <linux/i2c.h>
+#include <linux/list.h>
#include <drm/drmP.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
#include <linux/hdcp_qseecom.h>
#include "sde_kms.h"
+#define MAX_STREAM_COUNT 2
+
enum sde_hdcp_client_id {
HDCP_CLIENT_HDMI,
HDCP_CLIENT_DP,
@@ -38,6 +41,18 @@
HDCP_VERSION_MAX = BIT(2),
};
+struct stream_info {
+ u8 stream_id;
+ u8 virtual_channel;
+};
+
+struct sde_hdcp_stream {
+ struct list_head list;
+ u8 stream_id;
+ u8 virtual_channel;
+ u32 stream_handle;
+};
+
struct sde_hdcp_init_data {
struct device *msm_hdcp_dev;
struct dss_io_data *core_io;
@@ -67,7 +82,13 @@
bool (*feature_supported)(void *input);
void (*force_encryption)(void *input, bool enable);
bool (*sink_support)(void *input);
+ int (*set_mode)(void *input, bool mst_enabled);
+ int (*on)(void *input);
void (*off)(void *hdcp_ctrl);
+ int (*register_streams)(void *input, u8 num_streams,
+ struct stream_info *streams);
+ int (*deregister_streams)(void *input, u8 num_streams,
+ struct stream_info *streams);
};
static inline const char *sde_hdcp_state_name(enum sde_hdcp_state hdcp_state)
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.c b/drivers/gpu/drm/msm/sde_hdcp_2x.c
index c72ab1b..f578e09 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.c
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.c
@@ -64,6 +64,10 @@
atomic_t hdcp_off;
enum sde_hdcp_2x_device_type device_type;
u8 min_enc_level;
+ struct list_head stream_handles;
+ u8 stream_count;
+ struct stream_info *streams;
+ u8 num_streams;
struct task_struct *thread;
struct completion response_completion;
@@ -315,6 +319,8 @@
static void sde_hdcp_2x_clean(struct sde_hdcp_2x_ctrl *hdcp)
{
+ struct list_head *element;
+ struct sde_hdcp_stream *stream_entry;
struct hdcp_transport_wakeup_data cdata = {HDCP_TRANSPORT_CMD_INVALID};
hdcp->authenticated = false;
@@ -322,10 +328,20 @@
cdata.context = hdcp->client_data;
cdata.cmd = HDCP_TRANSPORT_CMD_STATUS_FAILED;
- if (!atomic_read(&hdcp->hdcp_off))
- sde_hdcp_2x_wakeup_client(hdcp, &cdata);
+ while (!list_empty(&hdcp->stream_handles)) {
+ element = hdcp->stream_handles.next;
+ list_del(element);
- atomic_set(&hdcp->hdcp_off, 1);
+ stream_entry = list_entry(element, struct sde_hdcp_stream,
+ list);
+ hdcp2_close_stream(hdcp->hdcp2_ctx,
+ stream_entry->stream_handle);
+ kzfree(stream_entry);
+ hdcp->stream_count--;
+ }
+
+ if (!atomic_xchg(&hdcp->hdcp_off, 1))
+ sde_hdcp_2x_wakeup_client(hdcp, &cdata);
hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_STOP, &hdcp->app_data);
}
@@ -477,19 +493,26 @@
static void sde_hdcp_2x_init(struct sde_hdcp_2x_ctrl *hdcp)
{
int rc;
-
rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_START, &hdcp->app_data);
if (rc)
- goto exit;
+ sde_hdcp_2x_clean(hdcp);
+}
- pr_debug("[tz]: %s\n", sde_hdcp_2x_message_name(
- hdcp->app_data.response.data[0]));
+static void sde_hdcp_2x_start_auth(struct sde_hdcp_2x_ctrl *hdcp)
+{
+ int rc;
+
+ rc = hdcp2_app_comm(hdcp->hdcp2_ctx, HDCP2_CMD_START_AUTH,
+ &hdcp->app_data);
+ if (rc) {
+ sde_hdcp_2x_clean(hdcp);
+ return;
+ }
+
+ pr_debug("message received from TZ: %s\n",
+ sde_hdcp_2x_message_name(hdcp->app_data.response.data[0]));
sde_hdcp_2x_send_message(hdcp);
-
- return;
-exit:
- sde_hdcp_2x_clean(hdcp);
}
static void sde_hdcp_2x_timeout(struct sde_hdcp_2x_ctrl *hdcp)
@@ -539,7 +562,8 @@
goto exit;
}
- if (hdcp->device_type == HDCP_TXMTR_DP) {
+ if (hdcp->device_type == HDCP_TXMTR_DP ||
+ hdcp->device_type == HDCP_TXMTR_DP_MST) {
msg[0] = hdcp->last_msg;
message_id_bytes = 1;
}
@@ -625,6 +649,147 @@
sde_hdcp_2x_clean(hdcp);
}
+static struct list_head *sde_hdcp_2x_stream_present(
+ struct sde_hdcp_2x_ctrl *hdcp, u8 stream_id, u8 virtual_channel)
+{
+ struct sde_hdcp_stream *stream_entry;
+ struct list_head *entry;
+ bool present = false;
+
+ list_for_each(entry, &hdcp->stream_handles) {
+ stream_entry = list_entry(entry,
+ struct sde_hdcp_stream, list);
+ if (stream_entry->virtual_channel == virtual_channel &&
+ stream_entry->stream_id == stream_id) {
+ present = true;
+ break;
+ }
+ }
+
+ if (!present)
+ entry = NULL;
+ return entry;
+}
+
+static void sde_hdcp_2x_open_stream(struct sde_hdcp_2x_ctrl *hdcp)
+{
+ int rc;
+ size_t iterations, i;
+ u8 stream_id;
+ u8 virtual_channel;
+ u32 stream_handle = 0;
+ bool query_streams = false;
+
+ if (!hdcp->streams) {
+ pr_err("Array of streams to register is NULL\n");
+ return;
+ }
+
+ iterations = min(hdcp->num_streams, (u8)(MAX_STREAM_COUNT));
+
+ for (i = 0; i < iterations; i++) {
+ if (hdcp->stream_count == MAX_STREAM_COUNT) {
+ pr_debug("Registered the maximum amount of streams\n");
+ break;
+ }
+
+ stream_id = hdcp->streams[i].stream_id;
+ virtual_channel = hdcp->streams[i].virtual_channel;
+
+ pr_debug("Opening stream %d, virtual channel %d\n",
+ stream_id, virtual_channel);
+
+ if (sde_hdcp_2x_stream_present(hdcp, stream_id,
+ virtual_channel)) {
+ pr_debug("Stream %d, virtual channel %d already open\n",
+ stream_id, virtual_channel);
+ continue;
+ }
+
+ rc = hdcp2_open_stream(hdcp->hdcp2_ctx, virtual_channel,
+ stream_id, &stream_handle);
+ if (rc) {
+ pr_err("Unable to open stream %d, virtual channel %d\n",
+ stream_id, virtual_channel);
+ } else {
+ struct sde_hdcp_stream *stream =
+ kzalloc(sizeof(struct sde_hdcp_stream),
+ GFP_KERNEL);
+ if (!stream)
+ break;
+
+ INIT_LIST_HEAD(&stream->list);
+ stream->stream_handle = stream_handle;
+ stream->stream_id = stream_id;
+ stream->virtual_channel = virtual_channel;
+
+ list_add(&stream->list, &hdcp->stream_handles);
+ hdcp->stream_count++;
+
+ query_streams = true;
+ }
+ }
+
+ if (query_streams && hdcp->authenticated)
+ sde_hdcp_2x_query_stream(hdcp);
+}
+
+static void sde_hdcp_2x_close_stream(struct sde_hdcp_2x_ctrl *hdcp)
+{
+ int rc;
+ size_t iterations, i;
+ u8 stream_id;
+ u8 virtual_channel;
+ struct list_head *entry;
+ struct sde_hdcp_stream *stream_entry;
+ bool query_streams = false;
+
+ if (!hdcp->streams) {
+ pr_err("Array of streams to register is NULL\n");
+ return;
+ }
+
+ iterations = min(hdcp->num_streams, (u8)(MAX_STREAM_COUNT));
+
+ for (i = 0; i < iterations; i++) {
+ if (hdcp->stream_count == 0) {
+ pr_debug("No streams are currently registered\n");
+ return;
+ }
+
+ stream_id = hdcp->streams[i].stream_id;
+ virtual_channel = hdcp->streams[i].virtual_channel;
+
+ pr_debug("Closing stream %d, virtual channel %d\n",
+ stream_id, virtual_channel);
+
+ entry = sde_hdcp_2x_stream_present(hdcp, stream_id,
+ virtual_channel);
+
+ if (!entry) {
+ pr_err("Unable to find stream %d, virtual channel %d\n"
+ , stream_id, virtual_channel);
+ continue;
+ }
+
+ stream_entry = list_entry(entry, struct sde_hdcp_stream,
+ list);
+
+ rc = hdcp2_close_stream(hdcp->hdcp2_ctx,
+ stream_entry->stream_handle);
+ if (rc)
+ pr_err("Unable to close stream %d, virtual channel %d\n"
+ , stream_id, virtual_channel);
+ hdcp->stream_count--;
+ list_del(entry);
+ kzfree(stream_entry);
+ query_streams = true;
+ }
+
+ if (query_streams && hdcp->authenticated)
+ sde_hdcp_2x_query_stream(hdcp);
+}
+
/** sde_hdcp_2x_wakeup() - wakeup the module to execute a requested command
* @data: data required for executing corresponding command.
*
@@ -648,6 +813,8 @@
hdcp->timeout_left = data->timeout;
hdcp->total_message_length = data->total_message_length;
hdcp->min_enc_level = data->min_enc_level;
+ hdcp->streams = data->streams;
+ hdcp->num_streams = data->num_streams;
if (!completion_done(&hdcp->response_completion))
complete_all(&hdcp->response_completion);
@@ -709,6 +876,9 @@
case HDCP_2X_CMD_STOP:
sde_hdcp_2x_clean(hdcp);
break;
+ case HDCP_2X_CMD_START_AUTH:
+ sde_hdcp_2x_start_auth(hdcp);
+ break;
case HDCP_2X_CMD_MSG_SEND_SUCCESS:
sde_hdcp_2x_msg_sent(hdcp);
break;
@@ -733,6 +903,12 @@
}
sde_hdcp_2x_query_stream(hdcp);
break;
+ case HDCP_2X_CMD_OPEN_STREAMS:
+ sde_hdcp_2x_open_stream(hdcp);
+ break;
+ case HDCP_2X_CMD_CLOSE_STREAMS:
+ sde_hdcp_2x_close_stream(hdcp);
+ break;
default:
break;
}
@@ -777,16 +953,14 @@
goto unlock;
}
+ INIT_LIST_HEAD(&hdcp->stream_handles);
hdcp->client_data = data->client_data;
hdcp->client_ops = data->client_ops;
- hdcp->device_type = data->device_type;
-
- hdcp->hdcp2_ctx = hdcp2_init(hdcp->device_type);
INIT_KFIFO(hdcp->cmd_q);
init_waitqueue_head(&hdcp->wait_q);
- atomic_set(&hdcp->hdcp_off, 0);
+ atomic_set(&hdcp->hdcp_off, 1);
init_completion(&hdcp->response_completion);
@@ -811,6 +985,40 @@
return rc;
}
+int sde_hdcp_2x_enable(void *data, enum sde_hdcp_2x_device_type device_type)
+{
+ int rc = 0;
+ struct sde_hdcp_2x_ctrl *hdcp = data;
+
+ if (!hdcp)
+ return -EINVAL;
+
+ if (hdcp->hdcp2_ctx) {
+ pr_debug("HDCP library context already acquired\n");
+ return 0;
+ }
+
+ hdcp->device_type = device_type;
+ hdcp->hdcp2_ctx = hdcp2_init(hdcp->device_type);
+ if (!hdcp->hdcp2_ctx) {
+ pr_err("Unable to acquire HDCP library handle\n");
+ return -ENOMEM;
+ }
+
+ return rc;
+}
+
+void sde_hdcp_2x_disable(void *data)
+{
+ struct sde_hdcp_2x_ctrl *hdcp = data;
+
+ if (!hdcp->hdcp2_ctx)
+ return;
+
+ hdcp2_deinit(hdcp->hdcp2_ctx);
+ hdcp->hdcp2_ctx = NULL;
+}
+
void sde_hdcp_2x_deregister(void *data)
{
struct sde_hdcp_2x_ctrl *hdcp = data;
@@ -818,7 +1026,7 @@
if (!hdcp)
return;
+ sde_hdcp_2x_disable(data);
kthread_stop(hdcp->thread);
- hdcp2_deinit(hdcp->hdcp2_ctx);
kzfree(hdcp);
}
diff --git a/drivers/gpu/drm/msm/sde_hdcp_2x.h b/drivers/gpu/drm/msm/sde_hdcp_2x.h
index 47247e4..cfcd7ce 100644
--- a/drivers/gpu/drm/msm/sde_hdcp_2x.h
+++ b/drivers/gpu/drm/msm/sde_hdcp_2x.h
@@ -15,8 +15,9 @@
/**
* enum sde_hdcp_2x_wakeup_cmd - commands for interacting with HDCP driver
* @HDCP_2X_CMD_INVALID: initialization value
- * @HDCP_2X_CMD_START: start authentication
- * @HDCP_2X_CMD_STOP: stop authentication
+ * @HDCP_2X_CMD_START: start HDCP driver
+ * @HDCP_2X_CMD_START_AUTH: start authentication
+ * @HDCP_2X_CMD_STOP: stop HDCP driver
* @HDCP_2X_CMD_MSG_SEND_SUCCESS: sending message to sink succeeded
* @HDCP_2X_CMD_MSG_SEND_FAILED: sending message to sink failed
* @HDCP_2X_CMD_MSG_SEND_TIMEOUT: sending message to sink timed out
@@ -26,10 +27,13 @@
* @HDCP_2X_CMD_QUERY_STREAM_TYPE: start content stream processing
* @HDCP_2X_CMD_LINK_FAILED: link failure notification
* @HDCP_2X_CMD_MIN_ENC_LEVEL: trigger minimum encryption level change
+ * @HDCP_2X_CMD_OPEN_STREAMS: open a virtual channel
+ * @HDCP_2X_CMD_CLOSE_STREAMS: close a virtual channel
*/
enum sde_hdcp_2x_wakeup_cmd {
HDCP_2X_CMD_INVALID,
HDCP_2X_CMD_START,
+ HDCP_2X_CMD_START_AUTH,
HDCP_2X_CMD_STOP,
HDCP_2X_CMD_MSG_SEND_SUCCESS,
HDCP_2X_CMD_MSG_SEND_FAILED,
@@ -40,6 +44,8 @@
HDCP_2X_CMD_QUERY_STREAM_TYPE,
HDCP_2X_CMD_LINK_FAILED,
HDCP_2X_CMD_MIN_ENC_LEVEL,
+ HDCP_2X_CMD_OPEN_STREAMS,
+ HDCP_2X_CMD_CLOSE_STREAMS,
};
/**
@@ -66,16 +72,19 @@
enum sde_hdcp_2x_device_type {
HDCP_TXMTR_HDMI = 0x8001,
- HDCP_TXMTR_DP = 0x8002
+ HDCP_TXMTR_DP = 0x8002,
+ HDCP_TXMTR_DP_MST = 0x8003
};
/**
* struct sde_hdcp_2x_lib_wakeup_data - command and data send to HDCP driver
- * @cmd: command type
- * @context: void pointer to the HDCP driver instance
- * @buf: message received from the sink
- * @buf_len: length of message received from the sink
- * @timeout: time out value for timed transactions
+ * @cmd: command type
+ * @context: void pointer to the HDCP driver instance
+ * @buf: message received from the sink
+ * @buf_len: length of message received from the sink
+ * @timeout: time out value for timed transactions
+ * @streams: list indicating which streams need adjustment
+ * @num_streams: number of entries in streams
*/
struct sde_hdcp_2x_wakeup_data {
enum sde_hdcp_2x_wakeup_cmd cmd;
@@ -83,6 +92,8 @@
uint32_t total_message_length;
uint32_t timeout;
u8 min_enc_level;
+ struct stream_info *streams;
+ u8 num_streams;
};
/**
@@ -151,6 +162,10 @@
return TO_STR(HDCP_2X_CMD_MSG_RECV_TIMEOUT);
case HDCP_2X_CMD_QUERY_STREAM_TYPE:
return TO_STR(HDCP_2X_CMD_QUERY_STREAM_TYPE);
+ case HDCP_2X_CMD_OPEN_STREAMS:
+ return TO_STR(HDCP_2X_CMD_OPEN_STREAMS);
+ case HDCP_2X_CMD_CLOSE_STREAMS:
+ return TO_STR(HDCP_2X_CMD_CLOSE_STREAMS);
default:
return "UNKNOWN";
}
@@ -190,12 +205,13 @@
struct sde_hdcp_2x_register_data {
struct hdcp_transport_ops *client_ops;
struct sde_hdcp_2x_ops *ops;
- enum sde_hdcp_2x_device_type device_type;
void *client_data;
void **hdcp_data;
};
/* functions for the HDCP 2.2 state machine module */
int sde_hdcp_2x_register(struct sde_hdcp_2x_register_data *data);
+int sde_hdcp_2x_enable(void *data, enum sde_hdcp_2x_device_type device_type);
+void sde_hdcp_2x_disable(void *data);
void sde_hdcp_2x_deregister(void *data);
#endif
diff --git a/drivers/gpu/drm/msm/sde_rsc.c b/drivers/gpu/drm/msm/sde_rsc.c
index 725d34f..cf218c6 100644
--- a/drivers/gpu/drm/msm/sde_rsc.c
+++ b/drivers/gpu/drm/msm/sde_rsc.c
@@ -94,7 +94,7 @@
pr_err("invalid rsc index\n");
return ERR_PTR(-EINVAL);
} else if (!rsc_prv_list[rsc_index]) {
- pr_err("rsc not probed yet or not available\n");
+ pr_debug("rsc not probed yet or not available\n");
return NULL;
}
@@ -250,7 +250,7 @@
pr_err("invalid rsc index:%d\n", rsc_index);
return false;
} else if (!rsc_prv_list[rsc_index]) {
- pr_err("rsc idx:%d not probed yet or not available\n",
+ pr_debug("rsc idx:%d not probed yet or not available\n",
rsc_index);
return false;
}
diff --git a/drivers/gpu/drm/radeon/ci_dpm.c b/drivers/gpu/drm/radeon/ci_dpm.c
index d587779..a97294a 100644
--- a/drivers/gpu/drm/radeon/ci_dpm.c
+++ b/drivers/gpu/drm/radeon/ci_dpm.c
@@ -5676,7 +5676,7 @@
u16 data_offset, size;
u8 frev, crev;
struct ci_power_info *pi;
- enum pci_bus_speed speed_cap;
+ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self;
int ret;
@@ -5685,7 +5685,8 @@
return -ENOMEM;
rdev->pm.dpm.priv = pi;
- speed_cap = pcie_get_speed_cap(root);
+ if (!pci_is_root_bus(rdev->pdev->bus))
+ speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) {
pi->sys_pcie_mask = 0;
} else {
diff --git a/drivers/gpu/drm/radeon/si_dpm.c b/drivers/gpu/drm/radeon/si_dpm.c
index 8fb60b3..0a785ef 100644
--- a/drivers/gpu/drm/radeon/si_dpm.c
+++ b/drivers/gpu/drm/radeon/si_dpm.c
@@ -6899,7 +6899,7 @@
struct ni_power_info *ni_pi;
struct si_power_info *si_pi;
struct atom_clock_dividers dividers;
- enum pci_bus_speed speed_cap;
+ enum pci_bus_speed speed_cap = PCI_SPEED_UNKNOWN;
struct pci_dev *root = rdev->pdev->bus->self;
int ret;
@@ -6911,7 +6911,8 @@
eg_pi = &ni_pi->eg;
pi = &eg_pi->rv7xx;
- speed_cap = pcie_get_speed_cap(root);
+ if (!pci_is_root_bus(rdev->pdev->bus))
+ speed_cap = pcie_get_speed_cap(root);
if (speed_cap == PCI_SPEED_UNKNOWN) {
si_pi->sys_pcie_mask = 0;
} else {
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 3fb084f..8c31c9a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -672,6 +672,7 @@
return PTR_ERR(tcon->sclk0);
}
}
+ clk_prepare_enable(tcon->sclk0);
if (tcon->quirks->has_channel_1) {
tcon->sclk1 = devm_clk_get(dev, "tcon-ch1");
@@ -686,6 +687,7 @@
static void sun4i_tcon_free_clocks(struct sun4i_tcon *tcon)
{
+ clk_disable_unprepare(tcon->sclk0);
clk_disable_unprepare(tcon->clk);
}
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index c2f7a04..9cc556a 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -452,7 +452,8 @@
.major = 5,
.minor = 0,
.patchid = 0,
- .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU,
+ .features = ADRENO_64BIT | ADRENO_RPMH | ADRENO_GPMU |
+ ADRENO_IOCOHERENT,
.sqefw_name = "a650_sqe.fw",
.zap_name = "a650_zap",
.gpudev = &adreno_a6xx_gpudev,
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index e123112..293142d 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -167,8 +167,9 @@
{
struct stp_master *master;
size_t size;
+ unsigned long align = sizeof(unsigned long);
- size = ALIGN(stm->data->sw_nchannels, 8) / 8;
+ size = ALIGN(stm->data->sw_nchannels, align) / align;
size += sizeof(struct stp_master);
master = kzalloc(size, GFP_ATOMIC);
if (!master)
@@ -218,8 +219,8 @@
bitmap_release_region(&master->chan_map[0], output->channel,
ilog2(output->nr_chans));
- output->nr_chans = 0;
master->nr_free += output->nr_chans;
+ output->nr_chans = 0;
}
/*
diff --git a/drivers/i2c/busses/i2c-omap.c b/drivers/i2c/busses/i2c-omap.c
index 65d06a8..2ac8609 100644
--- a/drivers/i2c/busses/i2c-omap.c
+++ b/drivers/i2c/busses/i2c-omap.c
@@ -1498,8 +1498,7 @@
return 0;
}
-#ifdef CONFIG_PM
-static int omap_i2c_runtime_suspend(struct device *dev)
+static int __maybe_unused omap_i2c_runtime_suspend(struct device *dev)
{
struct omap_i2c_dev *omap = dev_get_drvdata(dev);
@@ -1525,7 +1524,7 @@
return 0;
}
-static int omap_i2c_runtime_resume(struct device *dev)
+static int __maybe_unused omap_i2c_runtime_resume(struct device *dev)
{
struct omap_i2c_dev *omap = dev_get_drvdata(dev);
@@ -1540,20 +1539,18 @@
}
static const struct dev_pm_ops omap_i2c_pm_ops = {
+ SET_NOIRQ_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(omap_i2c_runtime_suspend,
omap_i2c_runtime_resume, NULL)
};
-#define OMAP_I2C_PM_OPS (&omap_i2c_pm_ops)
-#else
-#define OMAP_I2C_PM_OPS NULL
-#endif /* CONFIG_PM */
static struct platform_driver omap_i2c_driver = {
.probe = omap_i2c_probe,
.remove = omap_i2c_remove,
.driver = {
.name = "omap_i2c",
- .pm = OMAP_I2C_PM_OPS,
+ .pm = &omap_i2c_pm_ops,
.of_match_table = of_match_ptr(omap_i2c_of_match),
},
};
diff --git a/drivers/infiniband/hw/hfi1/ud.c b/drivers/infiniband/hw/hfi1/ud.c
index 70d39fc..54eb695 100644
--- a/drivers/infiniband/hw/hfi1/ud.c
+++ b/drivers/infiniband/hw/hfi1/ud.c
@@ -980,7 +980,6 @@
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = packet->ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
- tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
diff --git a/drivers/infiniband/hw/qib/qib_ud.c b/drivers/infiniband/hw/qib/qib_ud.c
index f8d029a..bce2b5c 100644
--- a/drivers/infiniband/hw/qib/qib_ud.c
+++ b/drivers/infiniband/hw/qib/qib_ud.c
@@ -513,7 +513,6 @@
opcode == IB_OPCODE_UD_SEND_ONLY_WITH_IMMEDIATE) {
wc.ex.imm_data = ohdr->u.ud.imm_data;
wc.wc_flags = IB_WC_WITH_IMM;
- tlen -= sizeof(u32);
} else if (opcode == IB_OPCODE_UD_SEND_ONLY) {
wc.ex.imm_data = 0;
wc.wc_flags = 0;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h
index 1abe3c6..b22d02c 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib.h
+++ b/drivers/infiniband/ulp/ipoib/ipoib.h
@@ -248,7 +248,6 @@
struct list_head list;
struct net_device *dev;
struct ipoib_neigh *neigh;
- struct ipoib_path *path;
struct ipoib_tx_buf *tx_ring;
unsigned int tx_head;
unsigned int tx_tail;
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_cm.c b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
index 0428e01e..aa9dcfc 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_cm.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_cm.c
@@ -1312,7 +1312,6 @@
neigh->cm = tx;
tx->neigh = neigh;
- tx->path = path;
tx->dev = dev;
list_add(&tx->list, &priv->cm.start_list);
set_bit(IPOIB_FLAG_INITIALIZED, &tx->flags);
@@ -1371,7 +1370,7 @@
neigh->daddr + QPN_AND_OPTIONS_OFFSET);
goto free_neigh;
}
- memcpy(&pathrec, &p->path->pathrec, sizeof(pathrec));
+ memcpy(&pathrec, &path->pathrec, sizeof(pathrec));
spin_unlock_irqrestore(&priv->lock, flags);
netif_tx_unlock_bh(dev);
diff --git a/drivers/input/mouse/elan_i2c_core.c b/drivers/input/mouse/elan_i2c_core.c
index 225ae69..628ef61 100644
--- a/drivers/input/mouse/elan_i2c_core.c
+++ b/drivers/input/mouse/elan_i2c_core.c
@@ -1337,6 +1337,7 @@
{ "ELAN0000", 0 },
{ "ELAN0100", 0 },
{ "ELAN0600", 0 },
+ { "ELAN0601", 0 },
{ "ELAN0602", 0 },
{ "ELAN0605", 0 },
{ "ELAN0608", 0 },
diff --git a/drivers/input/tablet/wacom_serial4.c b/drivers/input/tablet/wacom_serial4.c
index 38bfaca..150f9ee 100644
--- a/drivers/input/tablet/wacom_serial4.c
+++ b/drivers/input/tablet/wacom_serial4.c
@@ -187,6 +187,7 @@
MODEL_DIGITIZER_II = 0x5544, /* UD */
MODEL_GRAPHIRE = 0x4554, /* ET */
MODEL_PENPARTNER = 0x4354, /* CT */
+ MODEL_ARTPAD_II = 0x4B54, /* KT */
};
static void wacom_handle_model_response(struct wacom *wacom)
@@ -245,6 +246,7 @@
wacom->flags = F_HAS_STYLUS2 | F_HAS_SCROLLWHEEL;
break;
+ case MODEL_ARTPAD_II:
case MODEL_DIGITIZER_II:
wacom->dev->name = "Wacom Digitizer II";
wacom->dev->id.version = MODEL_DIGITIZER_II;
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 34c9aa7..27500ab 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -1929,16 +1929,13 @@
static void do_detach(struct iommu_dev_data *dev_data)
{
+ struct protection_domain *domain = dev_data->domain;
struct amd_iommu *iommu;
u16 alias;
iommu = amd_iommu_rlookup_table[dev_data->devid];
alias = dev_data->alias;
- /* decrease reference counters */
- dev_data->domain->dev_iommu[iommu->index] -= 1;
- dev_data->domain->dev_cnt -= 1;
-
/* Update data structures */
dev_data->domain = NULL;
list_del(&dev_data->list);
@@ -1948,6 +1945,16 @@
/* Flush the DTE entry */
device_flush_dte(dev_data);
+
+ /* Flush IOTLB */
+ domain_flush_tlb_pde(domain);
+
+ /* Wait for the flushes to finish */
+ domain_flush_complete(domain);
+
+ /* decrease reference counters - needs to happen after the flushes */
+ domain->dev_iommu[iommu->index] -= 1;
+ domain->dev_cnt -= 1;
}
/*
@@ -2555,13 +2562,13 @@
bus_addr = address + s->dma_address + (j << PAGE_SHIFT);
iommu_unmap_page(domain, bus_addr, PAGE_SIZE);
- if (--mapped_pages)
+ if (--mapped_pages == 0)
goto out_free_iova;
}
}
out_free_iova:
- free_iova_fast(&dma_dom->iovad, address, npages);
+ free_iova_fast(&dma_dom->iovad, address >> PAGE_SHIFT, npages);
out_err:
return 0;
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index da86788..4378f2c 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -240,7 +240,7 @@
static struct iommu_debug_attr secure_attr = {
.dma_type = 0,
- .vmid = VMID_CP_CAMERA,
+ .vmid = VMID_CP_PIXEL,
};
static int iommu_debug_set_attrs(struct iommu_debug_device *ddev,
diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c
index 4c2246f..15579cb 100644
--- a/drivers/irqchip/irq-gic-v3-its.c
+++ b/drivers/irqchip/irq-gic-v3-its.c
@@ -1581,6 +1581,9 @@
nr_irqs /= 2;
} while (nr_irqs > 0);
+ if (!nr_irqs)
+ err = -ENOSPC;
+
if (err)
goto out;
@@ -1951,6 +1954,29 @@
get_order(max_t(u32, LPI_PENDBASE_SZ, SZ_64K)));
}
+static u64 its_clear_vpend_valid(void __iomem *vlpi_base)
+{
+ u32 count = 1000000; /* 1s! */
+ bool clean;
+ u64 val;
+
+ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ val &= ~GICR_VPENDBASER_Valid;
+ gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+
+ do {
+ val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
+ clean = !(val & GICR_VPENDBASER_Dirty);
+ if (!clean) {
+ count--;
+ cpu_relax();
+ udelay(1);
+ }
+ } while (!clean && count);
+
+ return val;
+}
+
static void its_cpu_init_lpis(void)
{
void __iomem *rbase = gic_data_rdist_rd_base();
@@ -2024,6 +2050,30 @@
val |= GICR_CTLR_ENABLE_LPIS;
writel_relaxed(val, rbase + GICR_CTLR);
+ if (gic_rdists->has_vlpis) {
+ void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
+
+ /*
+ * It's possible for CPU to receive VLPIs before it is
+ * sheduled as a vPE, especially for the first CPU, and the
+ * VLPI with INTID larger than 2^(IDbits+1) will be considered
+ * as out of range and dropped by GIC.
+ * So we initialize IDbits to known value to avoid VLPI drop.
+ */
+ val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
+ pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
+ smp_processor_id(), val);
+ gits_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
+
+ /*
+ * Also clear Valid bit of GICR_VPENDBASER, in case some
+ * ancient programming gets left in and has possibility of
+ * corrupting memory.
+ */
+ val = its_clear_vpend_valid(vlpi_base);
+ WARN_ON(val & GICR_VPENDBASER_Dirty);
+ }
+
/* Make sure the GIC has seen the above */
dsb(sy);
}
@@ -2644,26 +2694,11 @@
static void its_vpe_deschedule(struct its_vpe *vpe)
{
void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
- u32 count = 1000000; /* 1s! */
- bool clean;
u64 val;
- /* We're being scheduled out */
- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
- val &= ~GICR_VPENDBASER_Valid;
- gits_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
+ val = its_clear_vpend_valid(vlpi_base);
- do {
- val = gits_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
- clean = !(val & GICR_VPENDBASER_Dirty);
- if (!clean) {
- count--;
- cpu_relax();
- udelay(1);
- }
- } while (!clean && count);
-
- if (unlikely(!clean && !count)) {
+ if (unlikely(val & GICR_VPENDBASER_Dirty)) {
pr_err_ratelimited("ITS virtual pending table not cleaning\n");
vpe->idai = false;
vpe->pending_last = true;
diff --git a/drivers/irqchip/irq-mmp.c b/drivers/irqchip/irq-mmp.c
index 25f32e1..3496b61 100644
--- a/drivers/irqchip/irq-mmp.c
+++ b/drivers/irqchip/irq-mmp.c
@@ -34,6 +34,9 @@
#define SEL_INT_PENDING (1 << 6)
#define SEL_INT_NUM_MASK 0x3f
+#define MMP2_ICU_INT_ROUTE_PJ4_IRQ (1 << 5)
+#define MMP2_ICU_INT_ROUTE_PJ4_FIQ (1 << 6)
+
struct icu_chip_data {
int nr_irqs;
unsigned int virq_base;
@@ -190,7 +193,8 @@
static const struct mmp_intc_conf mmp2_conf = {
.conf_enable = 0x20,
.conf_disable = 0x0,
- .conf_mask = 0x7f,
+ .conf_mask = MMP2_ICU_INT_ROUTE_PJ4_IRQ |
+ MMP2_ICU_INT_ROUTE_PJ4_FIQ,
};
static void __exception_irq_entry mmp_handle_irq(struct pt_regs *regs)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 7033a28..9df1334 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4630,7 +4630,6 @@
atomic_inc(&r10_bio->remaining);
read_bio->bi_next = NULL;
generic_make_request(read_bio);
- sector_nr += nr_sectors;
sectors_done += nr_sectors;
if (sector_nr <= last)
goto read_more;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
index f5e612c..3b3eb8e 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/cam_ife_hw_mgr.c
@@ -36,7 +36,7 @@
(CAM_ISP_PACKET_META_GENERIC_BLOB_COMMON + 1)
#define CAM_ISP_GENERIC_BLOB_TYPE_MAX \
- (CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2 + 1)
+ (CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG + 1)
static uint32_t blob_type_hw_cmd_map[CAM_ISP_GENERIC_BLOB_TYPE_MAX] = {
CAM_ISP_HW_CMD_GET_HFR_UPDATE,
@@ -3600,6 +3600,54 @@
return rc;
}
+static int cam_isp_blob_core_cfg_update(
+ uint32_t blob_type,
+ struct cam_isp_generic_blob_info *blob_info,
+ struct cam_isp_core_config *core_config,
+ struct cam_hw_prepare_update_args *prepare)
+{
+ struct cam_ife_hw_mgr_ctx *ctx = NULL;
+ struct cam_ife_hw_mgr_res *hw_mgr_res;
+ struct cam_hw_intf *hw_intf;
+ uint64_t clk_rate = 0;
+ int rc = -EINVAL, i;
+ struct cam_vfe_core_config_args vfe_core_config;
+
+ ctx = prepare->ctxt_to_hw_map;
+
+ list_for_each_entry(hw_mgr_res, &ctx->res_list_ife_src, list) {
+ for (i = 0; i < CAM_ISP_HW_SPLIT_MAX; i++) {
+ clk_rate = 0;
+ if (!hw_mgr_res->hw_res[i] ||
+ hw_mgr_res->res_id != CAM_ISP_HW_VFE_IN_CAMIF)
+ continue;
+
+ hw_intf = hw_mgr_res->hw_res[i]->hw_intf;
+ if (hw_intf && hw_intf->hw_ops.process_cmd) {
+ vfe_core_config.node_res =
+ hw_mgr_res->hw_res[i];
+
+ memcpy(&vfe_core_config.core_config,
+ core_config,
+ sizeof(struct cam_isp_core_config));
+
+ rc = hw_intf->hw_ops.process_cmd(
+ hw_intf->hw_priv,
+ CAM_ISP_HW_CMD_CORE_CONFIG,
+ &vfe_core_config,
+ sizeof(
+ struct cam_vfe_core_config_args));
+ if (rc)
+ CAM_ERR(CAM_ISP, "Core cfg parse fail");
+ } else {
+ CAM_WARN(CAM_ISP, "NULL hw_intf!");
+ }
+ }
+ }
+
+ return rc;
+}
+
static int cam_isp_blob_clock_update(
uint32_t blob_type,
struct cam_isp_generic_blob_info *blob_info,
@@ -3939,6 +3987,16 @@
CAM_ERR(CAM_ISP, "FS Update Failed rc: %d", rc);
}
break;
+ case CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG: {
+ struct cam_isp_core_config *core_config =
+ (struct cam_isp_core_config *)blob_data;
+
+ rc = cam_isp_blob_core_cfg_update(blob_type, blob_info,
+ core_config, prepare);
+ if (rc)
+ CAM_ERR(CAM_ISP, "Core cfg update fail: %d", rc);
+ }
+ break;
default:
CAM_WARN(CAM_ISP, "Invalid blob type %d", blob_type);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
index 3bdd4fb..a3321d6 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_isp_hw.h
@@ -98,6 +98,7 @@
CAM_ISP_HW_CMD_FE_UPDATE_IN_RD,
CAM_ISP_HW_CMD_FE_UPDATE_BUS_RD,
CAM_ISP_HW_CMD_UBWC_UPDATE_V2,
+ CAM_ISP_HW_CMD_CORE_CONFIG,
CAM_ISP_HW_CMD_MAX,
};
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
index c331a55..1c1f867 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/include/cam_vfe_hw_intf.h
@@ -183,6 +183,17 @@
};
/*
+ * struct cam_vfe_core_config_args:
+ *
+ * @node_res: Resource to get the time stamp
+ * @core_config: Core config for IFE
+ */
+struct cam_vfe_core_config_args {
+ struct cam_isp_resource_node *node_res;
+ struct cam_isp_core_config core_config;
+};
+
+/*
* struct cam_vfe_bw_update_args:
*
* @node_res: Resource to get the BW
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
index 485da8b..3c8a7e2 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/cam_vfe_core.c
@@ -581,6 +581,7 @@
case CAM_ISP_HW_CMD_CLOCK_UPDATE:
case CAM_ISP_HW_CMD_BW_UPDATE:
case CAM_ISP_HW_CMD_BW_CONTROL:
+ case CAM_ISP_HW_CMD_CORE_CONFIG:
rc = core_info->vfe_top->hw_ops.process_cmd(
core_info->vfe_top->top_priv, cmd_type, cmd_args,
arg_size);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
index 20ad20f..6d5a514 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_bus/cam_vfe_bus_ver3.c
@@ -155,6 +155,9 @@
uint32_t dual_hw_alternate_vfe_id;
struct list_head vfe_out_list;
+ uint32_t is_master;
+ uint32_t is_dual;
+
uint32_t format;
uint32_t max_width;
uint32_t max_height;
@@ -1663,25 +1666,34 @@
if (rsrc_data->is_master) {
val = cam_io_r_mb(common_data->mem_base +
common_data->common_reg->comp_cfg_0);
+
val |= (0x1 << (rsrc_data->comp_grp_type + 14));
+
cam_io_w_mb(val, common_data->mem_base +
common_data->common_reg->comp_cfg_0);
val = cam_io_r_mb(common_data->mem_base +
common_data->common_reg->comp_cfg_1);
+
val |= (0x1 << rsrc_data->comp_grp_type);
+
cam_io_w_mb(val, common_data->mem_base +
common_data->common_reg->comp_cfg_1);
} else {
val = cam_io_r_mb(common_data->mem_base +
common_data->common_reg->comp_cfg_0);
+
val |= (0x1 << rsrc_data->comp_grp_type);
+ val |= (0x1 << (rsrc_data->comp_grp_type + 14));
+
cam_io_w_mb(val, common_data->mem_base +
common_data->common_reg->comp_cfg_0);
val = cam_io_r_mb(common_data->mem_base +
common_data->common_reg->comp_cfg_1);
+
val |= (0x1 << rsrc_data->comp_grp_type);
+
cam_io_w_mb(val, common_data->mem_base +
common_data->common_reg->comp_cfg_1);
}
@@ -1967,6 +1979,9 @@
return rc;
}
+ rsrc_data->is_dual = out_acquire_args->is_dual;
+ rsrc_data->is_master = out_acquire_args->is_master;
+
cam_vfe_bus_ver3_add_wm_to_comp_grp(rsrc_data->comp_grp,
client_done_mask);
@@ -2087,6 +2102,9 @@
rc = cam_vfe_bus_ver3_start_comp_grp(rsrc_data->comp_grp,
bus_irq_reg_mask);
+ if (rsrc_data->is_dual && !rsrc_data->is_master)
+ goto end;
+
vfe_out->irq_handle = cam_irq_controller_subscribe_irq(
common_data->bus_irq_controller,
CAM_IRQ_PRIORITY_1,
@@ -2130,6 +2148,7 @@
}
}
+end:
vfe_out->res_state = CAM_ISP_RESOURCE_STATE_STREAMING;
return rc;
}
@@ -3105,9 +3124,9 @@
CAM_ERR(CAM_ISP, "num_wm %d h_init 0x%x",
vfe_out_data->num_wm,
ubwc_generic_plane_cfg->h_init);
- if ((!wm_data->is_dual) && ((wm_data->ubwc_meta_cfg !=
- ubwc_generic_plane_cfg->meta_stride) ||
- !wm_data->init_cfg_done)) {
+ if (wm_data->ubwc_meta_cfg !=
+ ubwc_generic_plane_cfg->meta_stride ||
+ !wm_data->init_cfg_done) {
wm_data->ubwc_meta_cfg =
ubwc_generic_plane_cfg->meta_stride;
wm_data->ubwc_updated = true;
@@ -3166,45 +3185,6 @@
return rc;
}
-static uint32_t cam_vfe_bus_ver3_convert_bytes_to_pixels(uint32_t packer_fmt,
- uint32_t width)
-{
- int pixels = 0;
-
- switch (packer_fmt) {
- case PACKER_FMT_VER3_PLAIN_128:
- pixels = width / 16;
- break;
- case PACKER_FMT_VER3_PLAIN_8:
- case PACKER_FMT_VER3_PLAIN_8_ODD_EVEN:
- case PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10:
- case PACKER_FMT_VER3_PLAIN_8_LSB_MSB_10_ODD_EVEN:
- pixels = width;
- break;
- case PACKER_FMT_VER3_PLAIN_16_10BPP:
- case PACKER_FMT_VER3_PLAIN_16_12BPP:
- case PACKER_FMT_VER3_PLAIN_16_14BPP:
- case PACKER_FMT_VER3_PLAIN_16_16BPP:
- pixels = width / 2;
- break;
- case PACKER_FMT_VER3_PLAIN_32:
- pixels = width / 4;
- break;
- case PACKER_FMT_VER3_PLAIN_64:
- pixels = width / 8;
- break;
- case PACKER_FMT_VER3_TP_10:
- pixels = width * 3 / 4;
- break;
- case PACKER_FMT_VER3_MAX:
- default:
- CAM_ERR(CAM_ISP, "Invalid packer cfg 0x%x", packer_fmt);
- break;
- }
-
- return pixels;
-}
-
static int cam_vfe_bus_ver3_update_stripe_cfg(void *priv, void *cmd_args,
uint32_t arg_size)
{
@@ -3238,8 +3218,7 @@
wm_data = vfe_out_data->wm_res[i]->res_priv;
stripe_config = (struct cam_isp_dual_stripe_config *)
&stripe_args->dual_cfg->stripes[ports_plane_idx + i];
- wm_data->width = cam_vfe_bus_ver3_convert_bytes_to_pixels(
- wm_data->pack_fmt, stripe_config->width);
+ wm_data->width = stripe_config->width;
wm_data->offset = stripe_config->offset;
CAM_DBG(CAM_ISP, "id:%x WM:%d width:0x%x offset:%x",
stripe_args->res->res_id, wm_data->index,
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
index bc1a4f8..7dc1f83 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_lite_ver3.c
@@ -26,6 +26,7 @@
struct cam_vfe_camif_lite_ver3_reg_data *reg_data;
struct cam_hw_soc_info *soc_info;
enum cam_isp_hw_sync_mode sync_mode;
+ struct cam_vfe_camif_common_cfg cam_common_cfg;
cam_hw_mgr_event_cb_func event_cb;
void *priv;
@@ -266,8 +267,11 @@
rsrc_data->sync_mode == CAM_ISP_HW_SYNC_SLAVE)
val |= (1 << rsrc_data->reg_data->extern_reg_update_shift);
- if (camif_lite_res->res_id == CAM_ISP_HW_VFE_IN_PDLIB)
+ if (camif_lite_res->res_id == CAM_ISP_HW_VFE_IN_PDLIB) {
val |= (1 << rsrc_data->reg_data->operating_mode_shift);
+ val |= (rsrc_data->cam_common_cfg.input_mux_sel_pdaf & 0x1) <<
+ CAM_SHIFT_TOP_CORE_CFG_MUXSEL_PDAF;
+ }
cam_io_w_mb(val, rsrc_data->mem_base +
rsrc_data->common_reg->core_cfg_0);
@@ -383,6 +387,39 @@
return rc;
}
+static int cam_vfe_camif_lite_ver3_core_config(
+ struct cam_isp_resource_node *rsrc_node, void *cmd_args)
+{
+ struct cam_vfe_mux_camif_lite_data *camif_lite_priv;
+ struct cam_vfe_core_config_args *vfe_core_cfg =
+ (struct cam_vfe_core_config_args *)cmd_args;
+
+ camif_lite_priv =
+ (struct cam_vfe_mux_camif_lite_data *)rsrc_node->res_priv;
+ camif_lite_priv->cam_common_cfg.vid_ds16_r2pd =
+ vfe_core_cfg->core_config.vid_ds16_r2pd;
+ camif_lite_priv->cam_common_cfg.vid_ds4_r2pd =
+ vfe_core_cfg->core_config.vid_ds4_r2pd;
+ camif_lite_priv->cam_common_cfg.disp_ds16_r2pd =
+ vfe_core_cfg->core_config.disp_ds16_r2pd;
+ camif_lite_priv->cam_common_cfg.disp_ds4_r2pd =
+ vfe_core_cfg->core_config.disp_ds4_r2pd;
+ camif_lite_priv->cam_common_cfg.dsp_streaming_tap_point =
+ vfe_core_cfg->core_config.dsp_streaming_tap_point;
+ camif_lite_priv->cam_common_cfg.ihist_src_sel =
+ vfe_core_cfg->core_config.ihist_src_sel;
+ camif_lite_priv->cam_common_cfg.hdr_be_src_sel =
+ vfe_core_cfg->core_config.hdr_be_src_sel;
+ camif_lite_priv->cam_common_cfg.hdr_bhist_src_sel =
+ vfe_core_cfg->core_config.hdr_bhist_src_sel;
+ camif_lite_priv->cam_common_cfg.input_mux_sel_pdaf =
+ vfe_core_cfg->core_config.input_mux_sel_pdaf;
+ camif_lite_priv->cam_common_cfg.input_mux_sel_pp =
+ vfe_core_cfg->core_config.input_mux_sel_pp;
+
+ return 0;
+}
+
static int cam_vfe_camif_lite_process_cmd(
struct cam_isp_resource_node *rsrc_node,
uint32_t cmd_type, void *cmd_args, uint32_t arg_size)
@@ -399,6 +436,9 @@
rc = cam_vfe_camif_lite_get_reg_update(rsrc_node, cmd_args,
arg_size);
break;
+ case CAM_ISP_HW_CMD_CORE_CONFIG:
+ rc = cam_vfe_camif_lite_ver3_core_config(rsrc_node, cmd_args);
+ break;
default:
CAM_ERR(CAM_ISP,
"unsupported process command:%d", cmd_type);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
index f8cd120..058482b 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_camif_ver3.c
@@ -20,7 +20,6 @@
#include "cam_cpas_api.h"
#define CAM_VFE_CAMIF_IRQ_SOF_DEBUG_CNT_MAX 2
-#define CAM_VFE_CAMIF_VER3_CORE_CFG_0_DEFAULT 0x78002800
struct cam_vfe_mux_camif_ver3_data {
void __iomem *mem_base;
@@ -29,6 +28,7 @@
struct cam_vfe_top_ver3_reg_offset_common *common_reg;
struct cam_vfe_camif_ver3_reg_data *reg_data;
struct cam_hw_soc_info *soc_info;
+ struct cam_vfe_camif_common_cfg cam_common_cfg;
cam_hw_mgr_event_cb_func event_cb;
void *priv;
@@ -394,11 +394,6 @@
val = cam_io_r_mb(rsrc_data->mem_base +
rsrc_data->common_reg->core_cfg_0);
- /* Programming to default value must be removed once uapis have been
- * updated to receive this programming from userspace.
- */
- val |= CAM_VFE_CAMIF_VER3_CORE_CFG_0_DEFAULT;
-
/* AF stitching by hw disabled by default
* PP CAMIF currently operates only in offline mode
*/
@@ -419,6 +414,25 @@
(rsrc_data->sync_mode == CAM_ISP_HW_SYNC_MASTER))
val |= (1 << rsrc_data->reg_data->dual_ife_pix_en_shift);
+ val |= (~rsrc_data->cam_common_cfg.vid_ds16_r2pd & 0x1) <<
+ CAM_SHIFT_TOP_CORE_CFG_VID_DS16_R2PD;
+ val |= (~rsrc_data->cam_common_cfg.vid_ds4_r2pd & 0x1) <<
+ CAM_SHIFT_TOP_CORE_CFG_VID_DS4_R2PD;
+ val |= (~rsrc_data->cam_common_cfg.disp_ds16_r2pd & 0x1) <<
+ CAM_SHIFT_TOP_CORE_CFG_DISP_DS16_R2PD;
+ val |= (~rsrc_data->cam_common_cfg.disp_ds4_r2pd & 0x1) <<
+ CAM_SHIFT_TOP_CORE_CFG_DISP_DS4_R2PD;
+ val |= (rsrc_data->cam_common_cfg.dsp_streaming_tap_point & 0x3) <<
+ CAM_SHIFT_TOP_CORE_CFG_DSP_STREAMING;
+ val |= (rsrc_data->cam_common_cfg.ihist_src_sel & 0x1) <<
+ CAM_SHIFT_TOP_CORE_CFG_STATS_IHIST;
+ val |= (rsrc_data->cam_common_cfg.hdr_be_src_sel & 0x1) <<
+ CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BE;
+ val |= (rsrc_data->cam_common_cfg.hdr_bhist_src_sel & 0x1) <<
+ CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BHIST;
+ val |= (rsrc_data->cam_common_cfg.input_mux_sel_pp & 0x3) <<
+ CAM_SHIFT_TOP_CORE_CFG_INPUTMUX_PP;
+
cam_io_w_mb(val, rsrc_data->mem_base +
rsrc_data->common_reg->core_cfg_0);
@@ -676,6 +690,39 @@
return rc;
}
+static int cam_vfe_camif_ver3_core_config(
+ struct cam_isp_resource_node *rsrc_node, void *cmd_args)
+{
+ struct cam_vfe_mux_camif_ver3_data *camif_priv;
+ struct cam_vfe_core_config_args *vfe_core_cfg =
+ (struct cam_vfe_core_config_args *)cmd_args;
+
+ camif_priv =
+ (struct cam_vfe_mux_camif_ver3_data *)rsrc_node->res_priv;
+ camif_priv->cam_common_cfg.vid_ds16_r2pd =
+ vfe_core_cfg->core_config.vid_ds16_r2pd;
+ camif_priv->cam_common_cfg.vid_ds4_r2pd =
+ vfe_core_cfg->core_config.vid_ds4_r2pd;
+ camif_priv->cam_common_cfg.disp_ds16_r2pd =
+ vfe_core_cfg->core_config.disp_ds16_r2pd;
+ camif_priv->cam_common_cfg.disp_ds4_r2pd =
+ vfe_core_cfg->core_config.disp_ds4_r2pd;
+ camif_priv->cam_common_cfg.dsp_streaming_tap_point =
+ vfe_core_cfg->core_config.dsp_streaming_tap_point;
+ camif_priv->cam_common_cfg.ihist_src_sel =
+ vfe_core_cfg->core_config.ihist_src_sel;
+ camif_priv->cam_common_cfg.hdr_be_src_sel =
+ vfe_core_cfg->core_config.hdr_be_src_sel;
+ camif_priv->cam_common_cfg.hdr_bhist_src_sel =
+ vfe_core_cfg->core_config.hdr_bhist_src_sel;
+ camif_priv->cam_common_cfg.input_mux_sel_pdaf =
+ vfe_core_cfg->core_config.input_mux_sel_pdaf;
+ camif_priv->cam_common_cfg.input_mux_sel_pp =
+ vfe_core_cfg->core_config.input_mux_sel_pp;
+
+ return 0;
+}
+
static int cam_vfe_camif_ver3_sof_irq_debug(
struct cam_isp_resource_node *rsrc_node, void *cmd_args)
{
@@ -718,6 +765,9 @@
case CAM_ISP_HW_CMD_SOF_IRQ_DEBUG:
rc = cam_vfe_camif_ver3_sof_irq_debug(rsrc_node, cmd_args);
break;
+ case CAM_ISP_HW_CMD_CORE_CONFIG:
+ rc = cam_vfe_camif_ver3_core_config(rsrc_node, cmd_args);
+ break;
case CAM_ISP_HW_CMD_SET_CAMIF_DEBUG:
camif_priv = (struct cam_vfe_mux_camif_ver3_data *)
rsrc_node->res_priv;
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
index 3f0799e..955cbf0 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.c
@@ -350,6 +350,19 @@
return rc;
}
+static int cam_vfe_core_config_control(
+ struct cam_vfe_top_ver3_priv *top_priv,
+ void *cmd_args, uint32_t arg_size)
+{
+ struct cam_vfe_core_config_args *core_config = cmd_args;
+
+ if (core_config->node_res->process_cmd)
+ return core_config->node_res->process_cmd(core_config->node_res,
+ CAM_ISP_HW_CMD_CORE_CONFIG, cmd_args, arg_size);
+
+ return -EINVAL;
+}
+
static int cam_vfe_top_ver3_bw_control(
struct cam_vfe_top_ver3_priv *top_priv,
void *cmd_args, uint32_t arg_size)
@@ -713,6 +726,9 @@
case CAM_ISP_HW_CMD_BW_CONTROL:
rc = cam_vfe_top_ver3_bw_control(top_priv, cmd_args, arg_size);
break;
+ case CAM_ISP_HW_CMD_CORE_CONFIG:
+ rc = cam_vfe_core_config_control(top_priv, cmd_args, arg_size);
+ break;
default:
rc = -EINVAL;
CAM_ERR(CAM_ISP, "Error, Invalid cmd:%d", cmd_type);
diff --git a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
index 08ad0eb..1ae8e5d 100644
--- a/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
+++ b/drivers/media/platform/msm/camera/cam_isp/isp_hw_mgr/isp_hw/vfe_hw/vfe_top/cam_vfe_top_ver3.h
@@ -12,6 +12,17 @@
#define CAM_VFE_TOP_VER3_MUX_MAX 6
+#define CAM_SHIFT_TOP_CORE_CFG_MUXSEL_PDAF 31
+#define CAM_SHIFT_TOP_CORE_CFG_VID_DS16_R2PD 30
+#define CAM_SHIFT_TOP_CORE_CFG_VID_DS4_R2PD 29
+#define CAM_SHIFT_TOP_CORE_CFG_DISP_DS16_R2PD 28
+#define CAM_SHIFT_TOP_CORE_CFG_DISP_DS4_R2PD 27
+#define CAM_SHIFT_TOP_CORE_CFG_DSP_STREAMING 25
+#define CAM_SHIFT_TOP_CORE_CFG_STATS_IHIST 10
+#define CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BE 9
+#define CAM_SHIFT_TOP_CORE_CFG_STATS_HDR_BHIST 8
+#define CAM_SHIFT_TOP_CORE_CFG_INPUTMUX_PP 5
+
struct cam_vfe_top_ver3_reg_offset_common {
uint32_t hw_version;
uint32_t titan_version;
@@ -39,6 +50,19 @@
uint32_t bus_overflow_status;
};
+struct cam_vfe_camif_common_cfg {
+ uint32_t vid_ds16_r2pd;
+ uint32_t vid_ds4_r2pd;
+ uint32_t disp_ds16_r2pd;
+ uint32_t disp_ds4_r2pd;
+ uint32_t dsp_streaming_tap_point;
+ uint32_t ihist_src_sel;
+ uint32_t hdr_be_src_sel;
+ uint32_t hdr_bhist_src_sel;
+ uint32_t input_mux_sel_pdaf;
+ uint32_t input_mux_sel_pp;
+};
+
struct cam_vfe_top_ver3_hw_info {
struct cam_vfe_top_ver3_reg_offset_common *common_reg;
struct cam_vfe_camif_ver3_hw_info camif_hw_info;
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
index 981d7bba..864b37e 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.c
@@ -41,6 +41,7 @@
link->last_flush_id = 0;
link->initial_sync_req = -1;
link->in_msync_mode = false;
+ link->retry_cnt = 0;
}
void cam_req_mgr_handle_core_shutdown(void)
@@ -167,6 +168,46 @@
}
/**
+ * __cam_req_mgr_notify_error_on_link()
+ *
+ * @brief : Notify userspace on exceeding max retry
+ * attempts to apply same req
+ * @link : link on which the req could not be applied
+ *
+ */
+static int __cam_req_mgr_notify_error_on_link(
+ struct cam_req_mgr_core_link *link)
+{
+ struct cam_req_mgr_core_session *session = NULL;
+ struct cam_req_mgr_message msg;
+ int rc = 0;
+
+ session = (struct cam_req_mgr_core_session *)link->parent;
+
+ CAM_ERR(CAM_CRM,
+ "Notifying userspace to trigger recovery on link 0x%x for session %d",
+ link->link_hdl, session->session_hdl);
+
+ memset(&msg, 0, sizeof(msg));
+
+ msg.session_hdl = session->session_hdl;
+ msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
+ msg.u.err_msg.request_id = 0;
+ msg.u.err_msg.link_hdl = link->link_hdl;
+
+ rc = cam_req_mgr_notify_message(&msg,
+ V4L_EVENT_CAM_REQ_MGR_ERROR,
+ V4L_EVENT_CAM_REQ_MGR_EVENT);
+
+ if (rc)
+ CAM_ERR(CAM_CRM,
+ "Error in notifying recovery for session %d link 0x%x rc %d",
+ session->session_hdl, link->link_hdl, rc);
+
+ return rc;
+}
+
+/**
* __cam_req_mgr_traverse()
*
* @brief : Traverse through pd tables, it will internally cover all linked
@@ -1092,7 +1133,20 @@
if (rc < 0) {
/* Apply req failed retry at next sof */
slot->status = CRM_SLOT_STATUS_REQ_PENDING;
+
+ link->retry_cnt++;
+ if (link->retry_cnt == MAXIMUM_RETRY_ATTEMPTS) {
+ CAM_DBG(CAM_CRM,
+ "Max retry attempts reached on link[0x%x] for req [%lld]",
+ link->link_hdl,
+ in_q->slot[in_q->rd_idx].req_id);
+ __cam_req_mgr_notify_error_on_link(link);
+ link->retry_cnt = 0;
+ }
} else {
+ if (link->retry_cnt)
+ link->retry_cnt = 0;
+
link->trigger_mask |= trigger;
CAM_DBG(CAM_CRM, "Applied req[%lld] on link[%x] success",
@@ -1342,7 +1396,7 @@
memset(&msg, 0, sizeof(msg));
msg.session_hdl = session->session_hdl;
- msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_DEVICE;
+ msg.u.err_msg.error_type = CAM_REQ_MGR_ERROR_TYPE_RECOVERY;
msg.u.err_msg.request_id = 0;
msg.u.err_msg.link_hdl = link->link_hdl;
@@ -1586,6 +1640,7 @@
link->req.in_q = NULL;
i = link - g_links;
CAM_DBG(CAM_CRM, "free link index %d", i);
+ cam_req_mgr_core_link_reset(link);
atomic_set(&g_links[i].is_used, 0);
}
diff --git a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
index 8f07b3b..9a6acbc 100644
--- a/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
+++ b/drivers/media/platform/msm/camera/cam_req_mgr/cam_req_mgr_core.h
@@ -29,6 +29,8 @@
#define MAXIMUM_LINKS_PER_SESSION 4
+#define MAXIMUM_RETRY_ATTEMPTS 3
+
/**
* enum crm_workq_task_type
* @codes: to identify which type of task is present
@@ -310,6 +312,8 @@
* @in_msync_mode : Flag to determine if a link is in master-slave mode
* @initial_sync_req : The initial req which is required to sync with the
* other link
+ * @retry_cnt : Counter that tracks number of attempts to apply
+ * the same req
*/
struct cam_req_mgr_core_link {
int32_t link_hdl;
@@ -336,6 +340,7 @@
bool initial_skip;
bool in_msync_mode;
int64_t initial_sync_req;
+ uint32_t retry_cnt;
};
/**
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index b4c3cf1..23b422e 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -28,11 +28,10 @@
#define QP_ENABLE_P 0x2
#define QP_ENABLE_B 0x4
#define MIN_QP 0
-#define MAX_QP 0x33
-#define MAX_QP_PACKED 0x333333
-#define DEFAULT_MIN_QP 0xA
-#define DEFAULT_MIN_QP_PACKED 0xA0A0A
-#define DEFAULT_MAX_QP_PACKED 0x2C2C2C
+#define MAX_QP 0x7F
+#define MAX_QP_PACKED 0x7F7F7F
+#define DEFAULT_QP 0xA
+#define DEFAULT_QP_PACKED 0xA0A0A
#define MAX_INTRA_REFRESH_MBS ((7680 * 4320) >> 8)
#define MAX_LTR_FRAME_COUNT 10
#define MAX_NUM_B_FRAMES 1
@@ -99,7 +98,7 @@
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = MIN_QP,
.maximum = MAX_QP,
- .default_value = DEFAULT_MIN_QP,
+ .default_value = DEFAULT_QP,
.step = 1,
.qmenu = NULL,
},
@@ -109,7 +108,7 @@
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = MIN_QP,
.maximum = MAX_QP,
- .default_value = DEFAULT_MIN_QP,
+ .default_value = DEFAULT_QP,
.step = 1,
.qmenu = NULL,
},
@@ -119,7 +118,7 @@
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = MIN_QP,
.maximum = MAX_QP,
- .default_value = DEFAULT_MIN_QP,
+ .default_value = DEFAULT_QP,
.step = 1,
.qmenu = NULL,
},
@@ -129,7 +128,7 @@
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = MIN_QP,
.maximum = MAX_QP_PACKED,
- .default_value = DEFAULT_MIN_QP_PACKED,
+ .default_value = DEFAULT_QP_PACKED,
.step = 1,
.qmenu = NULL,
},
@@ -139,7 +138,7 @@
.type = V4L2_CTRL_TYPE_INTEGER,
.minimum = MIN_QP,
.maximum = MAX_QP_PACKED,
- .default_value = DEFAULT_MAX_QP_PACKED,
+ .default_value = DEFAULT_QP_PACKED,
.step = 1,
.qmenu = NULL,
},
@@ -888,10 +887,10 @@
},
{
.id = V4L2_CID_MPEG_VIDC_VENC_HDR_INFO,
- .name = "Enable/Disable HDR INFO",
- .type = V4L2_CTRL_TYPE_U32,
- .minimum = 0,
- .maximum = UINT_MAX,
+ .name = "HDR PQ information",
+ .type = V4L2_CTRL_TYPE_INTEGER,
+ .minimum = INT_MIN,
+ .maximum = INT_MAX,
.default_value = 0,
.step = 1,
},
@@ -1365,9 +1364,7 @@
struct msm_vidc_mastering_display_colour_sei_payload *mdisp_sei = NULL;
struct msm_vidc_content_light_level_sei_payload *cll_sei = NULL;
struct hal_buffer_requirements *buff_req_buffer = NULL;
- struct v4l2_ctrl *i_qp = NULL;
- struct v4l2_ctrl *p_qp = NULL;
- struct v4l2_ctrl *b_qp = NULL;
+ u32 i_qp_min, i_qp_max, p_qp_min, p_qp_max, b_qp_min, b_qp_max;
if (!inst || !inst->core || !inst->core->device || !ctrl) {
dprintk(VIDC_ERR, "%s invalid parameters\n", __func__);
@@ -1592,15 +1589,18 @@
break;
case V4L2_CID_MPEG_VIDEO_HEVC_MIN_QP:
case V4L2_CID_MPEG_VIDEO_HEVC_MAX_QP:
- i_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP);
- p_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP);
- b_qp = get_ctrl(inst, V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP);
- if ((ctrl->val & 0xff) < i_qp->minimum ||
- ((ctrl->val >> 8) & 0xff) < p_qp->minimum ||
- ((ctrl->val >> 16) & 0xff) < b_qp->minimum ||
- (ctrl->val & 0xff) > i_qp->maximum ||
- ((ctrl->val >> 8) & 0xff) > p_qp->maximum ||
- ((ctrl->val >> 16) & 0xff) > b_qp->maximum) {
+ i_qp_min = inst->capability.cap[CAP_I_FRAME_QP].min;
+ i_qp_max = inst->capability.cap[CAP_I_FRAME_QP].max;
+ p_qp_min = inst->capability.cap[CAP_P_FRAME_QP].min;
+ p_qp_max = inst->capability.cap[CAP_P_FRAME_QP].max;
+ b_qp_min = inst->capability.cap[CAP_B_FRAME_QP].min;
+ b_qp_max = inst->capability.cap[CAP_B_FRAME_QP].max;
+ if ((ctrl->val & 0xff) < i_qp_min ||
+ ((ctrl->val >> 8) & 0xff) < p_qp_min ||
+ ((ctrl->val >> 16) & 0xff) < b_qp_min ||
+ (ctrl->val & 0xff) > i_qp_max ||
+ ((ctrl->val >> 8) & 0xff) > p_qp_max ||
+ ((ctrl->val >> 16) & 0xff) > b_qp_max) {
dprintk(VIDC_ERR, "Invalid QP %#x\n", ctrl->val);
return -EINVAL;
}
@@ -1610,6 +1610,12 @@
inst->client_set_ctrls |= CLIENT_SET_MAX_QP;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP:
+ i_qp_min = inst->capability.cap[CAP_I_FRAME_QP].min;
+ i_qp_max = inst->capability.cap[CAP_I_FRAME_QP].max;
+ if (ctrl->val < i_qp_min || ctrl->val > i_qp_max) {
+ dprintk(VIDC_ERR, "Invalid I QP %#x\n", ctrl->val);
+ return -EINVAL;
+ }
inst->client_set_ctrls |= CLIENT_SET_I_QP;
if (inst->state == MSM_VIDC_START_DONE) {
rc = msm_venc_set_dyn_qp(inst, ctrl);
@@ -1620,9 +1626,21 @@
}
break;
case V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP:
+ p_qp_min = inst->capability.cap[CAP_P_FRAME_QP].min;
+ p_qp_max = inst->capability.cap[CAP_P_FRAME_QP].max;
+ if (ctrl->val < p_qp_min || ctrl->val > p_qp_max) {
+ dprintk(VIDC_ERR, "Invalid P QP %#x\n", ctrl->val);
+ return -EINVAL;
+ }
inst->client_set_ctrls |= CLIENT_SET_P_QP;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP:
+ b_qp_min = inst->capability.cap[CAP_B_FRAME_QP].min;
+ b_qp_max = inst->capability.cap[CAP_B_FRAME_QP].max;
+ if (ctrl->val < b_qp_min || ctrl->val > b_qp_max) {
+ dprintk(VIDC_ERR, "Invalid B QP %#x\n", ctrl->val);
+ return -EINVAL;
+ }
inst->client_set_ctrls |= CLIENT_SET_B_QP;
break;
case V4L2_CID_MPEG_VIDEO_HEVC_HIER_CODING_LAYER:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
index 10b96b4..e862a23 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_buffer_calculations.c
@@ -255,6 +255,7 @@
#define NUM_MBS_4k (((4096 + 15) >> 4) * ((2304 + 15) >> 4))
#define MB_SIZE_IN_PIXEL (16 * 16)
#define HDR10PLUS_PAYLOAD_SIZE 1024
+#define HDR10_HIST_EXTRADATA_SIZE 4096
static inline u32 calculate_h264d_scratch_size(struct msm_vidc_inst *inst,
u32 width, u32 height, bool is_interlaced);
@@ -1304,7 +1305,8 @@
if (split_mode_enabled)
vpss_lb_size = size_vpss_lb(width, height);
- size = co_mv_size + nonco_mv_size + vpss_lb_size;
+ size = co_mv_size + nonco_mv_size + vpss_lb_size +
+ HDR10_HIST_EXTRADATA_SIZE;
return size;
}
@@ -1369,7 +1371,7 @@
if (split_mode_enabled)
vpss_lb_size = size_vpss_lb(width, height);
- size += vpss_lb_size;
+ size += vpss_lb_size + HDR10_HIST_EXTRADATA_SIZE;
return size;
}
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 985f21a..7b0edfc 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1431,15 +1431,6 @@
msm_vidc_comm_update_ctrl(inst, V4L2_CID_MPEG_VIDEO_BITRATE,
&inst->capability.cap[CAP_BITRATE]);
msm_vidc_comm_update_ctrl(inst,
- V4L2_CID_MPEG_VIDEO_HEVC_I_FRAME_QP,
- &inst->capability.cap[CAP_I_FRAME_QP]);
- msm_vidc_comm_update_ctrl(inst,
- V4L2_CID_MPEG_VIDEO_HEVC_P_FRAME_QP,
- &inst->capability.cap[CAP_P_FRAME_QP]);
- msm_vidc_comm_update_ctrl(inst,
- V4L2_CID_MPEG_VIDEO_HEVC_B_FRAME_QP,
- &inst->capability.cap[CAP_B_FRAME_QP]);
- msm_vidc_comm_update_ctrl(inst,
V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MAX_BYTES,
&inst->capability.cap[CAP_SLICE_BYTE]);
msm_vidc_comm_update_ctrl(inst,
@@ -3560,7 +3551,7 @@
int rc = 0;
struct internal_buf *binfo = NULL;
u32 smem_flags = SMEM_UNCACHED, buffer_size, num_buffers, hfi_fmt;
- struct hal_buffer_requirements *output_buf, *extradata_buf;
+ struct hal_buffer_requirements *output_buf;
unsigned int i;
struct hfi_device *hdev;
struct hfi_buffer_size_minimum b;
@@ -3575,6 +3566,17 @@
return 0;
}
+ /* Set DPB buffer count to firmware */
+ rc = msm_comm_set_buffer_count(inst,
+ output_buf->buffer_count_min,
+ output_buf->buffer_count_min,
+ HAL_BUFFER_OUTPUT);
+ if (rc) {
+ dprintk(VIDC_ERR, "%s: failed to set bufreqs(%#x)\n",
+ __func__, buffer_type);
+ return -EINVAL;
+ }
+
/* For DPB buffers, Always use FW count */
num_buffers = output_buf->buffer_count_min;
hfi_fmt = msm_comm_convert_color_fmt(inst->clk_data.dpb_fourcc);
@@ -3594,12 +3596,15 @@
inst->session, HFI_PROPERTY_PARAM_BUFFER_SIZE_MINIMUM,
&b, sizeof(b));
- extradata_buf = get_buff_req_buffer(inst, HAL_BUFFER_EXTRADATA_OUTPUT);
- if (extradata_buf) {
+ if (inst->bufq[CAPTURE_PORT].num_planes == 1 ||
+ !inst->bufq[CAPTURE_PORT].plane_sizes[1]) {
dprintk(VIDC_DBG,
- "extradata: num = %d, size = %d\n",
- extradata_buf->buffer_count_actual,
- extradata_buf->buffer_size);
+ "This extradata buffer not required, buffer_type: %x\n",
+ buffer_type);
+ } else {
+ dprintk(VIDC_DBG,
+ "extradata: num = 1, size = %d\n",
+ inst->bufq[CAPTURE_PORT].plane_sizes[1]);
inst->dpb_extra_binfo = NULL;
inst->dpb_extra_binfo = kzalloc(sizeof(*binfo), GFP_KERNEL);
if (!inst->dpb_extra_binfo) {
@@ -3608,17 +3613,13 @@
goto fail_kzalloc;
}
rc = msm_comm_smem_alloc(inst,
- extradata_buf->buffer_size, 1, smem_flags,
+ inst->bufq[CAPTURE_PORT].plane_sizes[1], 1, smem_flags,
buffer_type, 0, &inst->dpb_extra_binfo->smem);
if (rc) {
dprintk(VIDC_ERR,
"Failed to allocate output memory\n");
goto err_no_mem;
}
- } else {
- dprintk(VIDC_DBG,
- "This extradata buffer not required, buffer_type: %x\n",
- buffer_type);
}
if (inst->flags & VIDC_SECURE)
@@ -4509,7 +4510,7 @@
}
dprintk(VIDC_DBG, "Buffer requirements :\n");
- dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s\n",
+ dprintk(VIDC_DBG, "%15s %8s %8s %8s %8s %8s\n",
"buffer type", "count", "mincount_host", "mincount_fw", "size",
"alignment");
for (i = 0; i < HAL_BUFFER_MAX; i++) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_platform.c b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
index 50a19f0..c14348f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_platform.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_platform.c
@@ -236,7 +236,7 @@
},
{
.key = "qcom,sw-power-collapse",
- .value = 0,
+ .value = 1,
},
{
.key = "qcom,domain-attr-non-fatal-faults",
@@ -280,11 +280,11 @@
},
{
.key = "qcom,power-collapse-delay",
- .value = 15000,
+ .value = 1500,
},
{
.key = "qcom,hw-resp-timeout",
- .value = 10000,
+ .value = 1000,
},
{
.key = "qcom,debug-timeout",
diff --git a/drivers/media/usb/uvc/uvc_driver.c b/drivers/media/usb/uvc/uvc_driver.c
index 361abbc..6f1fd40 100644
--- a/drivers/media/usb/uvc/uvc_driver.c
+++ b/drivers/media/usb/uvc/uvc_driver.c
@@ -1065,11 +1065,19 @@
return -EINVAL;
}
- /* Make sure the terminal type MSB is not null, otherwise it
- * could be confused with a unit.
+ /*
+ * Reject invalid terminal types that would cause issues:
+ *
+ * - The high byte must be non-zero, otherwise it would be
+ * confused with a unit.
+ *
+ * - Bit 15 must be 0, as we use it internally as a terminal
+ * direction flag.
+ *
+ * Other unknown types are accepted.
*/
type = get_unaligned_le16(&buffer[4]);
- if ((type & 0xff00) == 0) {
+ if ((type & 0x7f00) == 0 || (type & 0x8000) != 0) {
uvc_trace(UVC_TRACE_DESCR, "device %d videocontrol "
"interface %d INPUT_TERMINAL %d has invalid "
"type 0x%04x, skipping\n", udev->devnum,
diff --git a/drivers/misc/hdcp_qseecom.c b/drivers/misc/hdcp_qseecom.c
index 53947b5..6de29bc 100644
--- a/drivers/misc/hdcp_qseecom.c
+++ b/drivers/misc/hdcp_qseecom.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "[hdcp-qseecom] %s: " fmt, __func__
@@ -1044,11 +1044,7 @@
}
rc = handle->tx_init(handle);
- if (rc)
- goto error;
- if (!handle->legacy_app)
- rc = hdcp2_app_start_auth(handle);
error:
return rc;
}
@@ -1188,6 +1184,7 @@
pr_err("failed, rc=%d\n", rc);
return rc;
}
+EXPORT_SYMBOL(hdcp2_force_encryption);
static int hdcp2_app_query_stream(struct hdcp2_handle *handle)
{
@@ -1236,6 +1233,9 @@
case HDCP2_CMD_START:
rc = hdcp2_app_start(handle);
break;
+ case HDCP2_CMD_START_AUTH:
+ rc = hdcp2_app_start_auth(handle);
+ break;
case HDCP2_CMD_PROCESS_MSG:
rc = hdcp2_app_process_msg(handle);
break;
@@ -1268,6 +1268,7 @@
error:
return rc;
}
+EXPORT_SYMBOL(hdcp2_app_comm);
static int hdcp2_open_stream_helper(struct hdcp2_handle *handle,
uint8_t vc_payload_id,
@@ -1322,6 +1323,7 @@
return hdcp2_open_stream_helper(handle, vc_payload_id, stream_number,
stream_id);
}
+EXPORT_SYMBOL(hdcp2_open_stream);
static int hdcp2_close_stream_helper(struct hdcp2_handle *handle,
uint32_t stream_id)
@@ -1368,6 +1370,7 @@
return hdcp2_close_stream_helper(handle, stream_id);
}
+EXPORT_SYMBOL(hdcp2_close_stream);
void *hdcp2_init(u32 device_type)
{
@@ -1382,11 +1385,13 @@
error:
return handle;
}
+EXPORT_SYMBOL(hdcp2_init);
void hdcp2_deinit(void *ctx)
{
kzfree(ctx);
}
+EXPORT_SYMBOL(hdcp2_deinit);
void *hdcp1_init(void)
{
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index a6fcc5c..b2c42ca 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -1171,29 +1171,22 @@
}
}
- /* Link-local multicast packets should be passed to the
- * stack on the link they arrive as well as pass them to the
- * bond-master device. These packets are mostly usable when
- * stack receives it with the link on which they arrive
- * (e.g. LLDP) they also must be available on master. Some of
- * the use cases include (but are not limited to): LLDP agents
- * that must be able to operate both on enslaved interfaces as
- * well as on bonds themselves; linux bridges that must be able
- * to process/pass BPDUs from attached bonds when any kind of
- * STP version is enabled on the network.
+ /*
+ * For packets determined by bond_should_deliver_exact_match() call to
+ * be suppressed we want to make an exception for link-local packets.
+ * This is necessary for e.g. LLDP daemons to be able to monitor
+ * inactive slave links without being forced to bind to them
+ * explicitly.
+ *
+ * At the same time, packets that are passed to the bonding master
+ * (including link-local ones) can have their originating interface
+ * determined via PACKET_ORIGDEV socket option.
*/
- if (is_link_local_ether_addr(eth_hdr(skb)->h_dest)) {
- struct sk_buff *nskb = skb_clone(skb, GFP_ATOMIC);
-
- if (nskb) {
- nskb->dev = bond->dev;
- nskb->queue_mapping = 0;
- netif_rx(nskb);
- }
- return RX_HANDLER_PASS;
- }
- if (bond_should_deliver_exact_match(skb, slave, bond))
+ if (bond_should_deliver_exact_match(skb, slave, bond)) {
+ if (is_link_local_ether_addr(eth_hdr(skb)->h_dest))
+ return RX_HANDLER_PASS;
return RX_HANDLER_EXACT;
+ }
skb->dev = bond->dev;
diff --git a/drivers/net/dsa/mv88e6xxx/chip.c b/drivers/net/dsa/mv88e6xxx/chip.c
index 9f697a5..c078c79 100644
--- a/drivers/net/dsa/mv88e6xxx/chip.c
+++ b/drivers/net/dsa/mv88e6xxx/chip.c
@@ -884,7 +884,7 @@
default:
return U64_MAX;
}
- value = (((u64)high) << 16) | low;
+ value = (((u64)high) << 32) | low;
return value;
}
@@ -3070,7 +3070,7 @@
.port_disable_pri_override = mv88e6xxx_port_disable_pri_override,
.port_link_state = mv88e6352_port_link_state,
.port_get_cmode = mv88e6185_port_get_cmode,
- .stats_snapshot = mv88e6320_g1_stats_snapshot,
+ .stats_snapshot = mv88e6xxx_g1_stats_snapshot,
.stats_set_histogram = mv88e6095_g1_stats_set_histogram,
.stats_get_sset_count = mv88e6095_stats_get_sset_count,
.stats_get_strings = mv88e6095_stats_get_strings,
@@ -4188,7 +4188,7 @@
.name = "Marvell 88E6190",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
@@ -4211,7 +4211,7 @@
.name = "Marvell 88E6190X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
@@ -4234,7 +4234,7 @@
.name = "Marvell 88E6191",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.max_vid = 8191,
.port_base_addr = 0x0,
.phy_base_addr = 0x0,
@@ -4281,7 +4281,7 @@
.name = "Marvell 88E6290",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
@@ -4443,7 +4443,7 @@
.name = "Marvell 88E6390",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
@@ -4466,7 +4466,7 @@
.name = "Marvell 88E6390X",
.num_databases = 4096,
.num_ports = 11, /* 10 + Z80 */
- .num_internal_phys = 11,
+ .num_internal_phys = 9,
.num_gpio = 16,
.max_vid = 8191,
.port_base_addr = 0x0,
@@ -4561,6 +4561,14 @@
return 0;
}
+static void mv88e6xxx_ports_cmode_init(struct mv88e6xxx_chip *chip)
+{
+ int i;
+
+ for (i = 0; i < mv88e6xxx_num_ports(chip); i++)
+ chip->ports[i].cmode = MV88E6XXX_PORT_STS_CMODE_INVALID;
+}
+
static enum dsa_tag_protocol mv88e6xxx_get_tag_protocol(struct dsa_switch *ds,
int port)
{
@@ -4597,6 +4605,8 @@
if (err)
goto free;
+ mv88e6xxx_ports_cmode_init(chip);
+
mutex_lock(&chip->reg_lock);
err = mv88e6xxx_switch_reset(chip);
mutex_unlock(&chip->reg_lock);
diff --git a/drivers/net/dsa/mv88e6xxx/port.c b/drivers/net/dsa/mv88e6xxx/port.c
index 9294584..7fffce7 100644
--- a/drivers/net/dsa/mv88e6xxx/port.c
+++ b/drivers/net/dsa/mv88e6xxx/port.c
@@ -190,7 +190,7 @@
/* normal duplex detection */
break;
default:
- return -EINVAL;
+ return -EOPNOTSUPP;
}
err = mv88e6xxx_port_write(chip, port, MV88E6XXX_PORT_MAC_CTL, reg);
@@ -374,6 +374,10 @@
cmode = 0;
}
+ /* cmode doesn't change, nothing to do for us */
+ if (cmode == chip->ports[port].cmode)
+ return 0;
+
lane = mv88e6390x_serdes_get_lane(chip, port);
if (lane < 0)
return lane;
@@ -384,7 +388,7 @@
return err;
}
- err = mv88e6390_serdes_power(chip, port, false);
+ err = mv88e6390x_serdes_power(chip, port, false);
if (err)
return err;
@@ -400,7 +404,7 @@
if (err)
return err;
- err = mv88e6390_serdes_power(chip, port, true);
+ err = mv88e6390x_serdes_power(chip, port, true);
if (err)
return err;
diff --git a/drivers/net/dsa/mv88e6xxx/port.h b/drivers/net/dsa/mv88e6xxx/port.h
index b319100..95b59f5 100644
--- a/drivers/net/dsa/mv88e6xxx/port.h
+++ b/drivers/net/dsa/mv88e6xxx/port.h
@@ -52,6 +52,7 @@
#define MV88E6185_PORT_STS_CMODE_1000BASE_X 0x0005
#define MV88E6185_PORT_STS_CMODE_PHY 0x0006
#define MV88E6185_PORT_STS_CMODE_DISABLED 0x0007
+#define MV88E6XXX_PORT_STS_CMODE_INVALID 0xff
/* Offset 0x01: MAC (or PCS or Physical) Control Register */
#define MV88E6XXX_PORT_MAC_CTL 0x01
diff --git a/drivers/net/ethernet/altera/altera_msgdma.c b/drivers/net/ethernet/altera/altera_msgdma.c
index 0fb986b..0ae723f 100644
--- a/drivers/net/ethernet/altera/altera_msgdma.c
+++ b/drivers/net/ethernet/altera/altera_msgdma.c
@@ -145,7 +145,8 @@
& 0xffff;
if (inuse) { /* Tx FIFO is not empty */
- ready = priv->tx_prod - priv->tx_cons - inuse - 1;
+ ready = max_t(int,
+ priv->tx_prod - priv->tx_cons - inuse - 1, 0);
} else {
/* Check for buffered last packet */
status = csrrd32(priv->tx_dma_csr, msgdma_csroffs(status));
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 034f575..1fdaf86b 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -463,6 +463,12 @@
}
length >>= 9;
+ if (unlikely(length >= ARRAY_SIZE(bnxt_lhint_arr))) {
+ dev_warn_ratelimited(&pdev->dev, "Dropped oversize %d bytes TX packet.\n",
+ skb->len);
+ i = 0;
+ goto tx_dma_error;
+ }
flags |= bnxt_lhint_arr[length];
txbd->tx_bd_len_flags_type = cpu_to_le32(flags);
diff --git a/drivers/net/ethernet/cadence/macb.h b/drivers/net/ethernet/cadence/macb.h
index 3d45f4c..9bbaad9 100644
--- a/drivers/net/ethernet/cadence/macb.h
+++ b/drivers/net/ethernet/cadence/macb.h
@@ -643,6 +643,7 @@
#define MACB_CAPS_JUMBO 0x00000020
#define MACB_CAPS_GEM_HAS_PTP 0x00000040
#define MACB_CAPS_BD_RD_PREFETCH 0x00000080
+#define MACB_CAPS_NEEDS_RSTONUBR 0x00000100
#define MACB_CAPS_FIFO_MODE 0x10000000
#define MACB_CAPS_GIGABIT_MODE_AVAILABLE 0x20000000
#define MACB_CAPS_SG_DISABLED 0x40000000
@@ -1214,6 +1215,8 @@
int rx_bd_rd_prefetch;
int tx_bd_rd_prefetch;
+
+ u32 rx_intr_mask;
};
#ifdef CONFIG_MACB_USE_HWSTAMP
diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c
index 8f4b2f9..8abea1c 100644
--- a/drivers/net/ethernet/cadence/macb_main.c
+++ b/drivers/net/ethernet/cadence/macb_main.c
@@ -56,8 +56,7 @@
/* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
-#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
- | MACB_BIT(ISR_ROVR))
+#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(ISR_ROVR))
#define MACB_TX_ERR_FLAGS (MACB_BIT(ISR_TUND) \
| MACB_BIT(ISR_RLE) \
| MACB_BIT(TXERR))
@@ -1271,7 +1270,7 @@
queue_writel(queue, ISR, MACB_BIT(RCOMP));
napi_reschedule(napi);
} else {
- queue_writel(queue, IER, MACB_RX_INT_FLAGS);
+ queue_writel(queue, IER, bp->rx_intr_mask);
}
}
@@ -1289,7 +1288,7 @@
u32 ctrl;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
- queue_writel(queue, IDR, MACB_RX_INT_FLAGS |
+ queue_writel(queue, IDR, bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
}
@@ -1319,7 +1318,7 @@
/* Enable interrupts */
queue_writel(queue, IER,
- MACB_RX_INT_FLAGS |
+ bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
}
@@ -1373,14 +1372,14 @@
(unsigned int)(queue - bp->queues),
(unsigned long)status);
- if (status & MACB_RX_INT_FLAGS) {
+ if (status & bp->rx_intr_mask) {
/* There's no point taking any more interrupts
* until we have processed the buffers. The
* scheduling call may fail if the poll routine
* is already scheduled, so disable interrupts
* now.
*/
- queue_writel(queue, IDR, MACB_RX_INT_FLAGS);
+ queue_writel(queue, IDR, bp->rx_intr_mask);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP));
@@ -1413,8 +1412,9 @@
/* There is a hardware issue under heavy load where DMA can
* stop, this causes endless "used buffer descriptor read"
* interrupts but it can be cleared by re-enabling RX. See
- * the at91 manual, section 41.3.1 or the Zynq manual
- * section 16.7.4 for details.
+ * the at91rm9200 manual, section 41.3.1 or the Zynq manual
+ * section 16.7.4 for details. RXUBR is only enabled for
+ * these two versions.
*/
if (status & MACB_BIT(RXUBR)) {
ctrl = macb_readl(bp, NCR);
@@ -2264,7 +2264,7 @@
/* Enable interrupts */
queue_writel(queue, IER,
- MACB_RX_INT_FLAGS |
+ bp->rx_intr_mask |
MACB_TX_INT_FLAGS |
MACB_BIT(HRESP));
}
@@ -3912,6 +3912,7 @@
};
static const struct macb_config emac_config = {
+ .caps = MACB_CAPS_NEEDS_RSTONUBR,
.clk_init = at91ether_clk_init,
.init = at91ether_init,
};
@@ -3933,7 +3934,8 @@
};
static const struct macb_config zynq_config = {
- .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF,
+ .caps = MACB_CAPS_GIGABIT_MODE_AVAILABLE | MACB_CAPS_NO_GIGABIT_HALF |
+ MACB_CAPS_NEEDS_RSTONUBR,
.dma_burst_length = 16,
.clk_init = macb_clk_init,
.init = macb_init,
@@ -4088,6 +4090,10 @@
macb_dma_desc_get_size(bp);
}
+ bp->rx_intr_mask = MACB_RX_INT_FLAGS;
+ if (bp->caps & MACB_CAPS_NEEDS_RSTONUBR)
+ bp->rx_intr_mask |= MACB_BIT(RXUBR);
+
mac = of_get_mac_address(np);
if (mac) {
ether_addr_copy(bp->dev->dev_addr, mac);
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 6242249..b043370 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -2419,6 +2419,8 @@
out_notify_fail:
(void)cancel_work_sync(&priv->service_task);
out_read_prop_fail:
+ /* safe for ACPI FW */
+ of_node_put(to_of_node(priv->fwnode));
free_netdev(ndev);
return ret;
}
@@ -2448,6 +2450,9 @@
set_bit(NIC_STATE_REMOVING, &priv->state);
(void)cancel_work_sync(&priv->service_task);
+ /* safe for ACPI FW */
+ of_node_put(to_of_node(priv->fwnode));
+
free_netdev(ndev);
return 0;
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 774beda..e2710ff 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -1157,16 +1157,18 @@
*/
static int hns_nic_nway_reset(struct net_device *netdev)
{
- int ret = 0;
struct phy_device *phy = netdev->phydev;
- if (netif_running(netdev)) {
- /* if autoneg is disabled, don't restart auto-negotiation */
- if (phy && phy->autoneg == AUTONEG_ENABLE)
- ret = genphy_restart_aneg(phy);
- }
+ if (!netif_running(netdev))
+ return 0;
- return ret;
+ if (!phy)
+ return -EOPNOTSUPP;
+
+ if (phy->autoneg != AUTONEG_ENABLE)
+ return -EINVAL;
+
+ return genphy_restart_aneg(phy);
}
static u32
diff --git a/drivers/net/ethernet/hisilicon/hns_mdio.c b/drivers/net/ethernet/hisilicon/hns_mdio.c
index 017e084..baf5cc2 100644
--- a/drivers/net/ethernet/hisilicon/hns_mdio.c
+++ b/drivers/net/ethernet/hisilicon/hns_mdio.c
@@ -321,7 +321,7 @@
}
hns_mdio_cmd_write(mdio_dev, is_c45,
- MDIO_C45_WRITE_ADDR, phy_id, devad);
+ MDIO_C45_READ, phy_id, devad);
}
/* Step 5: waitting for MDIO_COMMAND_REG 's mdio_start==0,*/
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index 41fa22c..f81ad0a 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -424,9 +424,9 @@
struct rtnl_link_stats64 *stats)
{
struct i40e_netdev_priv *np = netdev_priv(netdev);
- struct i40e_ring *tx_ring, *rx_ring;
struct i40e_vsi *vsi = np->vsi;
struct rtnl_link_stats64 *vsi_stats = i40e_get_vsi_stats_struct(vsi);
+ struct i40e_ring *ring;
int i;
if (test_bit(__I40E_VSI_DOWN, vsi->state))
@@ -440,24 +440,26 @@
u64 bytes, packets;
unsigned int start;
- tx_ring = READ_ONCE(vsi->tx_rings[i]);
- if (!tx_ring)
+ ring = READ_ONCE(vsi->tx_rings[i]);
+ if (!ring)
continue;
- i40e_get_netdev_stats_struct_tx(tx_ring, stats);
+ i40e_get_netdev_stats_struct_tx(ring, stats);
- rx_ring = &tx_ring[1];
+ if (i40e_enabled_xdp_vsi(vsi)) {
+ ring++;
+ i40e_get_netdev_stats_struct_tx(ring, stats);
+ }
+ ring++;
do {
- start = u64_stats_fetch_begin_irq(&rx_ring->syncp);
- packets = rx_ring->stats.packets;
- bytes = rx_ring->stats.bytes;
- } while (u64_stats_fetch_retry_irq(&rx_ring->syncp, start));
+ start = u64_stats_fetch_begin_irq(&ring->syncp);
+ packets = ring->stats.packets;
+ bytes = ring->stats.bytes;
+ } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
stats->rx_packets += packets;
stats->rx_bytes += bytes;
- if (i40e_enabled_xdp_vsi(vsi))
- i40e_get_netdev_stats_struct_tx(&rx_ring[1], stats);
}
rcu_read_unlock();
diff --git a/drivers/net/ethernet/marvell/sky2.c b/drivers/net/ethernet/marvell/sky2.c
index ae2f350..1485f66 100644
--- a/drivers/net/ethernet/marvell/sky2.c
+++ b/drivers/net/ethernet/marvell/sky2.c
@@ -46,6 +46,7 @@
#include <linux/mii.h>
#include <linux/of_device.h>
#include <linux/of_net.h>
+#include <linux/dmi.h>
#include <asm/irq.h>
@@ -93,7 +94,7 @@
module_param(copybreak, int, 0);
MODULE_PARM_DESC(copybreak, "Receive copy threshold");
-static int disable_msi = 0;
+static int disable_msi = -1;
module_param(disable_msi, int, 0);
MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
@@ -4931,6 +4932,24 @@
return buf;
}
+static const struct dmi_system_id msi_blacklist[] = {
+ {
+ .ident = "Dell Inspiron 1545",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
+ DMI_MATCH(DMI_PRODUCT_NAME, "Inspiron 1545"),
+ },
+ },
+ {
+ .ident = "Gateway P-79",
+ .matches = {
+ DMI_MATCH(DMI_SYS_VENDOR, "Gateway"),
+ DMI_MATCH(DMI_PRODUCT_NAME, "P-79"),
+ },
+ },
+ {}
+};
+
static int sky2_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
struct net_device *dev, *dev1;
@@ -5042,6 +5061,9 @@
goto err_out_free_pci;
}
+ if (disable_msi == -1)
+ disable_msi = !!dmi_check_system(msi_blacklist);
+
if (!disable_msi && pci_enable_msi(pdev) == 0) {
err = sky2_test_msi(hw);
if (err) {
diff --git a/drivers/net/ethernet/mellanox/mlx4/cmd.c b/drivers/net/ethernet/mellanox/mlx4/cmd.c
index e65bc3c..857588e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx4/cmd.c
@@ -2645,6 +2645,8 @@
if (!priv->cmd.context)
return -ENOMEM;
+ if (mlx4_is_mfunc(dev))
+ mutex_lock(&priv->cmd.slave_cmd_mutex);
down_write(&priv->cmd.switch_sem);
for (i = 0; i < priv->cmd.max_cmds; ++i) {
priv->cmd.context[i].token = i;
@@ -2670,6 +2672,8 @@
down(&priv->cmd.poll_sem);
priv->cmd.use_events = 1;
up_write(&priv->cmd.switch_sem);
+ if (mlx4_is_mfunc(dev))
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
return err;
}
@@ -2682,6 +2686,8 @@
struct mlx4_priv *priv = mlx4_priv(dev);
int i;
+ if (mlx4_is_mfunc(dev))
+ mutex_lock(&priv->cmd.slave_cmd_mutex);
down_write(&priv->cmd.switch_sem);
priv->cmd.use_events = 0;
@@ -2689,9 +2695,12 @@
down(&priv->cmd.event_sem);
kfree(priv->cmd.context);
+ priv->cmd.context = NULL;
up(&priv->cmd.poll_sem);
up_write(&priv->cmd.switch_sem);
+ if (mlx4_is_mfunc(dev))
+ mutex_unlock(&priv->cmd.slave_cmd_mutex);
}
struct mlx4_cmd_mailbox *mlx4_alloc_cmd_mailbox(struct mlx4_dev *dev)
diff --git a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
index 31bd567..676428a 100644
--- a/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
+++ b/drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
@@ -2719,13 +2719,13 @@
int total_pages;
int total_mem;
int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
+ int tot;
sq_size = 1 << (log_sq_size + log_sq_sride + 4);
rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
total_mem = sq_size + rq_size;
- total_pages =
- roundup_pow_of_two((total_mem + (page_offset << 6)) >>
- page_shift);
+ tot = (total_mem + (page_offset << 6)) >> page_shift;
+ total_pages = !tot ? 1 : roundup_pow_of_two(tot);
return total_pages;
}
diff --git a/drivers/net/ethernet/microchip/lan743x_main.c b/drivers/net/ethernet/microchip/lan743x_main.c
index 42f5bfa..2083415 100644
--- a/drivers/net/ethernet/microchip/lan743x_main.c
+++ b/drivers/net/ethernet/microchip/lan743x_main.c
@@ -585,8 +585,7 @@
if (adapter->csr.flags &
LAN743X_CSR_FLAG_SUPPORTS_INTR_AUTO_SET_CLR) {
- flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR |
- LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
+ flags = LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_SET |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_SET |
LAN743X_VECTOR_FLAG_SOURCE_ENABLE_AUTO_CLEAR |
LAN743X_VECTOR_FLAG_SOURCE_STATUS_AUTO_CLEAR;
@@ -599,12 +598,6 @@
/* map TX interrupt to vector */
int_vec_map1 |= INT_VEC_MAP1_TX_VEC_(index, vector);
lan743x_csr_write(adapter, INT_VEC_MAP1, int_vec_map1);
- if (flags &
- LAN743X_VECTOR_FLAG_VECTOR_ENABLE_AUTO_CLEAR) {
- int_vec_en_auto_clr |= INT_VEC_EN_(vector);
- lan743x_csr_write(adapter, INT_VEC_EN_AUTO_CLR,
- int_vec_en_auto_clr);
- }
/* Remove TX interrupt from shared mask */
intr->vector_list[0].int_mask &= ~int_bit;
@@ -1403,7 +1396,8 @@
}
static void lan743x_tx_frame_add_lso(struct lan743x_tx *tx,
- unsigned int frame_length)
+ unsigned int frame_length,
+ int nr_frags)
{
/* called only from within lan743x_tx_xmit_frame.
* assuming tx->ring_lock has already been acquired.
@@ -1413,6 +1407,10 @@
/* wrap up previous descriptor */
tx->frame_data0 |= TX_DESC_DATA0_EXT_;
+ if (nr_frags <= 0) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
tx_descriptor->data0 = tx->frame_data0;
@@ -1517,8 +1515,11 @@
u32 tx_tail_flags = 0;
/* wrap up previous descriptor */
- tx->frame_data0 |= TX_DESC_DATA0_LS_;
- tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ if ((tx->frame_data0 & TX_DESC_DATA0_DTYPE_MASK_) ==
+ TX_DESC_DATA0_DTYPE_DATA_) {
+ tx->frame_data0 |= TX_DESC_DATA0_LS_;
+ tx->frame_data0 |= TX_DESC_DATA0_IOC_;
+ }
tx_descriptor = &tx->ring_cpu_ptr[tx->frame_tail];
buffer_info = &tx->buffer_info[tx->frame_tail];
@@ -1603,7 +1604,7 @@
}
if (gso)
- lan743x_tx_frame_add_lso(tx, frame_length);
+ lan743x_tx_frame_add_lso(tx, frame_length, nr_frags);
if (nr_frags <= 0)
goto finish;
@@ -1897,7 +1898,17 @@
return ((++index) % rx->ring_size);
}
-static int lan743x_rx_allocate_ring_element(struct lan743x_rx *rx, int index)
+static struct sk_buff *lan743x_rx_allocate_skb(struct lan743x_rx *rx)
+{
+ int length = 0;
+
+ length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
+ return __netdev_alloc_skb(rx->adapter->netdev,
+ length, GFP_ATOMIC | GFP_DMA);
+}
+
+static int lan743x_rx_init_ring_element(struct lan743x_rx *rx, int index,
+ struct sk_buff *skb)
{
struct lan743x_rx_buffer_info *buffer_info;
struct lan743x_rx_descriptor *descriptor;
@@ -1906,9 +1917,7 @@
length = (LAN743X_MAX_FRAME_SIZE + ETH_HLEN + 4 + RX_HEAD_PADDING);
descriptor = &rx->ring_cpu_ptr[index];
buffer_info = &rx->buffer_info[index];
- buffer_info->skb = __netdev_alloc_skb(rx->adapter->netdev,
- length,
- GFP_ATOMIC | GFP_DMA);
+ buffer_info->skb = skb;
if (!(buffer_info->skb))
return -ENOMEM;
buffer_info->dma_ptr = dma_map_single(&rx->adapter->pdev->dev,
@@ -2055,8 +2064,19 @@
/* packet is available */
if (first_index == last_index) {
/* single buffer packet */
+ struct sk_buff *new_skb = NULL;
int packet_length;
+ new_skb = lan743x_rx_allocate_skb(rx);
+ if (!new_skb) {
+ /* failed to allocate next skb.
+ * Memory is very low.
+ * Drop this packet and reuse buffer.
+ */
+ lan743x_rx_reuse_ring_element(rx, first_index);
+ goto process_extension;
+ }
+
buffer_info = &rx->buffer_info[first_index];
skb = buffer_info->skb;
descriptor = &rx->ring_cpu_ptr[first_index];
@@ -2076,7 +2096,7 @@
skb_put(skb, packet_length - 4);
skb->protocol = eth_type_trans(skb,
rx->adapter->netdev);
- lan743x_rx_allocate_ring_element(rx, first_index);
+ lan743x_rx_init_ring_element(rx, first_index, new_skb);
} else {
int index = first_index;
@@ -2089,26 +2109,23 @@
if (first_index <= last_index) {
while ((index >= first_index) &&
(index <= last_index)) {
- lan743x_rx_release_ring_element(rx,
- index);
- lan743x_rx_allocate_ring_element(rx,
- index);
+ lan743x_rx_reuse_ring_element(rx,
+ index);
index = lan743x_rx_next_index(rx,
index);
}
} else {
while ((index >= first_index) ||
(index <= last_index)) {
- lan743x_rx_release_ring_element(rx,
- index);
- lan743x_rx_allocate_ring_element(rx,
- index);
+ lan743x_rx_reuse_ring_element(rx,
+ index);
index = lan743x_rx_next_index(rx,
index);
}
}
}
+process_extension:
if (extension_index >= 0) {
descriptor = &rx->ring_cpu_ptr[extension_index];
buffer_info = &rx->buffer_info[extension_index];
@@ -2285,7 +2302,9 @@
rx->last_head = 0;
for (index = 0; index < rx->ring_size; index++) {
- ret = lan743x_rx_allocate_ring_element(rx, index);
+ struct sk_buff *new_skb = lan743x_rx_allocate_skb(rx);
+
+ ret = lan743x_rx_init_ring_element(rx, index, new_skb);
if (ret)
goto cleanup;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c
index 2f69ee9..4dd82a1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_dev.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c
@@ -473,19 +473,19 @@
/* get pq index according to PQ_FLAGS */
static u16 *qed_init_qm_get_idx_from_flags(struct qed_hwfn *p_hwfn,
- u32 pq_flags)
+ unsigned long pq_flags)
{
struct qed_qm_info *qm_info = &p_hwfn->qm_info;
/* Can't have multiple flags set here */
- if (bitmap_weight((unsigned long *)&pq_flags,
+ if (bitmap_weight(&pq_flags,
sizeof(pq_flags) * BITS_PER_BYTE) > 1) {
- DP_ERR(p_hwfn, "requested multiple pq flags 0x%x\n", pq_flags);
+ DP_ERR(p_hwfn, "requested multiple pq flags 0x%lx\n", pq_flags);
goto err;
}
if (!(qed_get_pq_flags(p_hwfn) & pq_flags)) {
- DP_ERR(p_hwfn, "pq flag 0x%x is not set\n", pq_flags);
+ DP_ERR(p_hwfn, "pq flag 0x%lx is not set\n", pq_flags);
goto err;
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c
index 67c02ea93..64ac95c 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c
@@ -609,6 +609,10 @@
(!!(accept_filter & QED_ACCEPT_MCAST_MATCHED) &&
!!(accept_filter & QED_ACCEPT_MCAST_UNMATCHED)));
+ SET_FIELD(state, ETH_VPORT_TX_MODE_UCAST_ACCEPT_ALL,
+ (!!(accept_filter & QED_ACCEPT_UCAST_MATCHED) &&
+ !!(accept_filter & QED_ACCEPT_UCAST_UNMATCHED)));
+
SET_FIELD(state, ETH_VPORT_TX_MODE_BCAST_ACCEPT_ALL,
!!(accept_filter & QED_ACCEPT_BCAST));
@@ -744,6 +748,11 @@
return rc;
}
+ if (p_params->update_ctl_frame_check) {
+ p_cmn->ctl_frame_mac_check_en = p_params->mac_chk_en;
+ p_cmn->ctl_frame_ethtype_check_en = p_params->ethtype_chk_en;
+ }
+
/* Update mcast bins for VFs, PF doesn't use this functionality */
qed_sp_update_mcast_bin(p_hwfn, p_ramrod, p_params);
@@ -2207,7 +2216,7 @@
u16 num_queues = 0;
/* Since the feature controls only queue-zones,
- * make sure we have the contexts [rx, tx, xdp] to
+ * make sure we have the contexts [rx, xdp, tcs] to
* match.
*/
for_each_hwfn(cdev, i) {
@@ -2217,7 +2226,8 @@
u16 cids;
cids = hwfn->pf_params.eth_pf_params.num_cons;
- num_queues += min_t(u16, l2_queues, cids / 3);
+ cids /= (2 + info->num_tc);
+ num_queues += min_t(u16, l2_queues, cids);
}
/* queues might theoretically be >256, but interrupts'
@@ -2688,7 +2698,8 @@
if (type == QED_FILTER_RX_MODE_TYPE_PROMISC) {
accept_flags.rx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
QED_ACCEPT_MCAST_UNMATCHED;
- accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
+ accept_flags.tx_accept_filter |= QED_ACCEPT_UCAST_UNMATCHED |
+ QED_ACCEPT_MCAST_UNMATCHED;
} else if (type == QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC) {
accept_flags.rx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
accept_flags.tx_accept_filter |= QED_ACCEPT_MCAST_UNMATCHED;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.h b/drivers/net/ethernet/qlogic/qed/qed_l2.h
index 8d80f10..7127d5a 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_l2.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_l2.h
@@ -219,6 +219,9 @@
struct qed_rss_params *rss_params;
struct qed_filter_accept_flags accept_flags;
struct qed_sge_tpa_params *sge_tpa_params;
+ u8 update_ctl_frame_check;
+ u8 mac_chk_en;
+ u8 ethtype_chk_en;
};
int qed_sp_vport_update(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_ll2.c b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
index 92cd8ab..015de1e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_ll2.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_ll2.c
@@ -2430,19 +2430,24 @@
{
struct qed_ll2_tx_pkt_info pkt;
const skb_frag_t *frag;
+ u8 flags = 0, nr_frags;
int rc = -EINVAL, i;
dma_addr_t mapping;
u16 vlan = 0;
- u8 flags = 0;
if (unlikely(skb->ip_summed != CHECKSUM_NONE)) {
DP_INFO(cdev, "Cannot transmit a checksummed packet\n");
return -EINVAL;
}
- if (1 + skb_shinfo(skb)->nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
+ /* Cache number of fragments from SKB since SKB may be freed by
+ * the completion routine after calling qed_ll2_prepare_tx_packet()
+ */
+ nr_frags = skb_shinfo(skb)->nr_frags;
+
+ if (1 + nr_frags > CORE_LL2_TX_MAX_BDS_PER_PACKET) {
DP_ERR(cdev, "Cannot transmit a packet with %d fragments\n",
- 1 + skb_shinfo(skb)->nr_frags);
+ 1 + nr_frags);
return -EINVAL;
}
@@ -2464,7 +2469,7 @@
}
memset(&pkt, 0, sizeof(pkt));
- pkt.num_of_bds = 1 + skb_shinfo(skb)->nr_frags;
+ pkt.num_of_bds = 1 + nr_frags;
pkt.vlan = vlan;
pkt.bd_flags = flags;
pkt.tx_dest = QED_LL2_TX_DEST_NW;
@@ -2475,12 +2480,17 @@
test_bit(QED_LL2_XMIT_FLAGS_FIP_DISCOVERY, &xmit_flags))
pkt.remove_stag = true;
+ /* qed_ll2_prepare_tx_packet() may actually send the packet if
+ * there are no fragments in the skb and subsequently the completion
+ * routine may run and free the SKB, so no dereferencing the SKB
+ * beyond this point unless skb has any fragments.
+ */
rc = qed_ll2_prepare_tx_packet(&cdev->hwfns[0], cdev->ll2->handle,
&pkt, 1);
if (rc)
goto err;
- for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+ for (i = 0; i < nr_frags; i++) {
frag = &skb_shinfo(skb)->frags[i];
mapping = skb_frag_dma_map(&cdev->pdev->dev, frag, 0,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sp.h b/drivers/net/ethernet/qlogic/qed/qed_sp.h
index 3157c0d..dae2896e 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sp.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_sp.h
@@ -380,6 +380,7 @@
* @param p_hwfn
*/
void qed_consq_free(struct qed_hwfn *p_hwfn);
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn);
/**
* @file
diff --git a/drivers/net/ethernet/qlogic/qed/qed_spq.c b/drivers/net/ethernet/qlogic/qed/qed_spq.c
index 7106ad1..a0ee847 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_spq.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_spq.c
@@ -402,6 +402,11 @@
qed_eq_prod_update(p_hwfn, qed_chain_get_prod_idx(p_chain));
+ /* Attempt to post pending requests */
+ spin_lock_bh(&p_hwfn->p_spq->lock);
+ rc = qed_spq_pend_post(p_hwfn);
+ spin_unlock_bh(&p_hwfn->p_spq->lock);
+
return rc;
}
@@ -745,7 +750,7 @@
return 0;
}
-static int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
+int qed_spq_pend_post(struct qed_hwfn *p_hwfn)
{
struct qed_spq *p_spq = p_hwfn->p_spq;
struct qed_spq_entry *p_ent = NULL;
@@ -883,7 +888,6 @@
struct qed_spq_entry *p_ent = NULL;
struct qed_spq_entry *tmp;
struct qed_spq_entry *found = NULL;
- int rc;
if (!p_hwfn)
return -EINVAL;
@@ -941,12 +945,7 @@
*/
qed_spq_return_entry(p_hwfn, found);
- /* Attempt to post pending requests */
- spin_lock_bh(&p_spq->lock);
- rc = qed_spq_pend_post(p_hwfn);
- spin_unlock_bh(&p_spq->lock);
-
- return rc;
+ return 0;
}
int qed_consq_alloc(struct qed_hwfn *p_hwfn)
diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
index ca6290f..71a7af1 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c
@@ -1969,7 +1969,9 @@
params.vport_id = vf->vport_id;
params.max_buffers_per_cqe = start->max_buffers_per_cqe;
params.mtu = vf->mtu;
- params.check_mac = true;
+
+ /* Non trusted VFs should enable control frame filtering */
+ params.check_mac = !vf->p_vf_info.is_trusted_configured;
rc = qed_sp_eth_vport_start(p_hwfn, ¶ms);
if (rc) {
@@ -5130,6 +5132,9 @@
params.opaque_fid = vf->opaque_fid;
params.vport_id = vf->vport_id;
+ params.update_ctl_frame_check = 1;
+ params.mac_chk_en = !vf_info->is_trusted_configured;
+
if (vf_info->rx_accept_mode & mask) {
flags->update_rx_mode_config = 1;
flags->rx_accept_filter = vf_info->rx_accept_mode;
@@ -5147,7 +5152,8 @@
}
if (flags->update_rx_mode_config ||
- flags->update_tx_mode_config)
+ flags->update_tx_mode_config ||
+ params.update_ctl_frame_check)
qed_sp_vport_update(hwfn, ¶ms,
QED_SPQ_MODE_EBLOCK, NULL);
}
diff --git a/drivers/net/ethernet/qlogic/qed/qed_vf.c b/drivers/net/ethernet/qlogic/qed/qed_vf.c
index be118d0..6ab3fb0 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_vf.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_vf.c
@@ -261,6 +261,7 @@
struct pfvf_acquire_resp_tlv *resp = &p_iov->pf2vf_reply->acquire_resp;
struct pf_vf_pfdev_info *pfdev_info = &resp->pfdev_info;
struct vf_pf_resc_request *p_resc;
+ u8 retry_cnt = VF_ACQUIRE_THRESH;
bool resources_acquired = false;
struct vfpf_acquire_tlv *req;
int rc = 0, attempts = 0;
@@ -314,6 +315,15 @@
/* send acquire request */
rc = qed_send_msg2pf(p_hwfn, &resp->hdr.status, sizeof(*resp));
+
+ /* Re-try acquire in case of vf-pf hw channel timeout */
+ if (retry_cnt && rc == -EBUSY) {
+ DP_VERBOSE(p_hwfn, QED_MSG_IOV,
+ "VF retrying to acquire due to VPC timeout\n");
+ retry_cnt--;
+ continue;
+ }
+
if (rc)
goto exit;
diff --git a/drivers/net/ethernet/qlogic/qede/qede.h b/drivers/net/ethernet/qlogic/qede/qede.h
index 6a4d266..d242a57 100644
--- a/drivers/net/ethernet/qlogic/qede/qede.h
+++ b/drivers/net/ethernet/qlogic/qede/qede.h
@@ -489,6 +489,9 @@
/* Datapath functions definition */
netdev_tx_t qede_start_xmit(struct sk_buff *skb, struct net_device *ndev);
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback);
netdev_features_t qede_features_check(struct sk_buff *skb,
struct net_device *dev,
netdev_features_t features);
diff --git a/drivers/net/ethernet/qlogic/qede/qede_fp.c b/drivers/net/ethernet/qlogic/qede/qede_fp.c
index 1a78027..a96da16 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_fp.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_fp.c
@@ -1695,6 +1695,19 @@
return NETDEV_TX_OK;
}
+u16 qede_select_queue(struct net_device *dev, struct sk_buff *skb,
+ struct net_device *sb_dev,
+ select_queue_fallback_t fallback)
+{
+ struct qede_dev *edev = netdev_priv(dev);
+ int total_txq;
+
+ total_txq = QEDE_TSS_COUNT(edev) * edev->dev_info.num_tc;
+
+ return QEDE_TSS_COUNT(edev) ?
+ fallback(dev, skb, NULL) % total_txq : 0;
+}
+
/* 8B udp header + 8B base tunnel header + 32B option length */
#define QEDE_MAX_TUN_HDR_LEN 48
diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c
index 46d0f2e..f3d9c40 100644
--- a/drivers/net/ethernet/qlogic/qede/qede_main.c
+++ b/drivers/net/ethernet/qlogic/qede/qede_main.c
@@ -631,6 +631,7 @@
.ndo_open = qede_open,
.ndo_stop = qede_close,
.ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
.ndo_set_rx_mode = qede_set_rx_mode,
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -666,6 +667,7 @@
.ndo_open = qede_open,
.ndo_stop = qede_close,
.ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
.ndo_set_rx_mode = qede_set_rx_mode,
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
@@ -684,6 +686,7 @@
.ndo_open = qede_open,
.ndo_stop = qede_close,
.ndo_start_xmit = qede_start_xmit,
+ .ndo_select_queue = qede_select_queue,
.ndo_set_rx_mode = qede_set_rx_mode,
.ndo_set_mac_address = qede_set_mac_addr,
.ndo_validate_addr = eth_validate_addr,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
index 3075bfa..4d47bd1 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.c
@@ -1,5 +1,5 @@
// SPDX-License-Identifier: GPL-2.0-only
-/* Copyright (c) 2013-2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2019, The Linux Foundation. All rights reserved.
*
* RMNET configuration engine
*
@@ -216,6 +216,10 @@
synchronize_rcu();
kfree(ep);
}
+
+ if (!port->nr_rmnet_devs)
+ qmi_rmnet_qmi_exit(port->qmi_info, port);
+
rmnet_unregister_real_device(real_dev, port);
unregister_netdevice_queue(dev, head);
@@ -236,6 +240,7 @@
ASSERT_RTNL();
port = rmnet_get_port_rtnl(dev);
+ qmi_rmnet_qmi_exit(port->qmi_info, port);
rmnet_unregister_bridge(dev, port);
@@ -250,8 +255,6 @@
unregister_netdevice_many(&list);
- qmi_rmnet_qmi_exit(port->qmi_info, port);
-
rmnet_unregister_real_device(real_dev, port);
}
@@ -554,6 +557,7 @@
*tx = 0;
*rx = 0;
+ rcu_read_lock();
hash_for_each(((struct rmnet_port *)port)->muxed_ep, bkt, ep, hlnode) {
priv = netdev_priv(ep->egress_dev);
for_each_possible_cpu(cpu) {
@@ -565,6 +569,7 @@
} while (u64_stats_fetch_retry_irq(&ps->syncp, start));
}
}
+ rcu_read_unlock();
}
EXPORT_SYMBOL(rmnet_get_packets);
@@ -601,6 +606,30 @@
}
EXPORT_SYMBOL(rmnet_enable_all_flows);
+bool rmnet_all_flows_enabled(void *port)
+{
+ struct rmnet_endpoint *ep;
+ unsigned long bkt;
+ bool ret = true;
+
+ if (unlikely(!port))
+ return true;
+
+ rcu_read_lock();
+ hash_for_each_rcu(((struct rmnet_port *)port)->muxed_ep,
+ bkt, ep, hlnode) {
+ if (!qmi_rmnet_all_flows_enabled(ep->egress_dev)) {
+ ret = false;
+ goto out;
+ }
+ }
+out:
+ rcu_read_unlock();
+
+ return ret;
+}
+EXPORT_SYMBOL(rmnet_all_flows_enabled);
+
int rmnet_get_powersave_notif(void *port)
{
if (!port)
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
index 9901c13..d2a667d 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_config.h
@@ -28,8 +28,6 @@
u64 dl_hdr_count;
u64 dl_hdr_total_bytes;
u64 dl_hdr_total_pkts;
- u64 dl_hdr_avg_bytes;
- u64 dl_hdr_avg_pkts;
u64 dl_trl_last_seq;
u64 dl_trl_count;
};
@@ -58,6 +56,7 @@
struct timespec agg_time;
struct timespec agg_last;
struct hrtimer hrtimer;
+ struct work_struct agg_wq;
void *qmi_info;
@@ -130,6 +129,11 @@
void __rcu *qos_info;
};
+enum rmnet_dl_marker_prio {
+ RMNET_PERF,
+ RMNET_SHS,
+};
+
enum rmnet_trace_func {
RMNET_MODULE,
NW_STACK_MODULE,
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
index a5d0578..b606760 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.c
@@ -83,6 +83,11 @@
struct rmnet_port *port) __rcu __read_mostly;
EXPORT_SYMBOL(rmnet_shs_skb_entry);
+/* Shs hook handler for work queue*/
+int (*rmnet_shs_skb_entry_wq)(struct sk_buff *skb,
+ struct rmnet_port *port) __rcu __read_mostly;
+EXPORT_SYMBOL(rmnet_shs_skb_entry_wq);
+
/* Generic handler */
void
@@ -125,6 +130,59 @@
}
EXPORT_SYMBOL(rmnet_deliver_skb);
+/* Important to note, port cannot be used here if it has gone stale */
+void
+rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
+ enum rmnet_packet_context ctx)
+{
+ int (*rmnet_shs_stamp)(struct sk_buff *skb, struct rmnet_port *port);
+ struct rmnet_priv *priv = netdev_priv(skb->dev);
+
+ trace_rmnet_low(RMNET_MODULE, RMNET_DLVR_SKB, 0xDEF, 0xDEF,
+ 0xDEF, 0xDEF, (void *)skb, NULL);
+ skb_reset_transport_header(skb);
+ skb_reset_network_header(skb);
+ rmnet_vnd_rx_fixup(skb->dev, skb->len);
+
+ skb->pkt_type = PACKET_HOST;
+ skb_set_mac_header(skb, 0);
+
+ /* packets coming from work queue context due to packet flush timer
+ * must go through the special workqueue path in SHS driver
+ */
+ rmnet_shs_stamp = (!ctx) ? rcu_dereference(rmnet_shs_skb_entry) :
+ rcu_dereference(rmnet_shs_skb_entry_wq);
+ if (rmnet_shs_stamp) {
+ rmnet_shs_stamp(skb, port);
+ return;
+ }
+
+ if (ctx == RMNET_NET_RX_CTX) {
+ if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
+ if (!rmnet_check_skb_can_gro(skb) &&
+ port->dl_marker_flush >= 0) {
+ struct napi_struct *napi =
+ get_current_napi_context();
+ napi_gro_receive(napi, skb);
+ port->dl_marker_flush++;
+ } else {
+ netif_receive_skb(skb);
+ }
+ } else {
+ if (!rmnet_check_skb_can_gro(skb))
+ gro_cells_receive(&priv->gro_cells, skb);
+ else
+ netif_receive_skb(skb);
+ }
+ } else {
+ if ((port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) &&
+ port->dl_marker_flush >= 0)
+ port->dl_marker_flush++;
+ gro_cells_receive(&priv->gro_cells, skb);
+ }
+}
+EXPORT_SYMBOL(rmnet_deliver_skb_wq);
+
/* Deliver a list of skbs after undoing coalescing */
static void rmnet_deliver_skb_list(struct sk_buff_head *head,
struct rmnet_port *port)
@@ -154,6 +212,7 @@
qmap = (struct rmnet_map_header *)rmnet_map_data_ptr(skb);
if (qmap->cd_bit) {
+ qmi_rmnet_set_dl_msg_active(port);
if (port->data_format & RMNET_INGRESS_FORMAT_DL_MARKER) {
if (!rmnet_map_flow_command(skb, port, false))
return;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
index 8ce6fe6c..09a2954 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_handlers.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0 */
-/* Copyright (c) 2013, 2016-2018 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013, 2016-2019 The Linux Foundation. All rights reserved.
*
* RMNET Data ingress/egress handler
*
@@ -10,8 +10,15 @@
#include "rmnet_config.h"
+enum rmnet_packet_context {
+ RMNET_NET_RX_CTX,
+ RMNET_WQ_CTX,
+};
+
void rmnet_egress_handler(struct sk_buff *skb);
void rmnet_deliver_skb(struct sk_buff *skb, struct rmnet_port *port);
+void rmnet_deliver_skb_wq(struct sk_buff *skb, struct rmnet_port *port,
+ enum rmnet_packet_context ctx);
void rmnet_set_skb_proto(struct sk_buff *skb);
rx_handler_result_t _rmnet_map_ingress_handler(struct sk_buff *skb,
struct rmnet_port *port);
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
index 6bc8a02..03ce4f3 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map.h
@@ -166,6 +166,7 @@
} __aligned(1);
struct rmnet_map_dl_ind {
+ u8 priority;
void (*dl_hdr_handler)(struct rmnet_map_dl_ind_hdr *dlhdr);
void (*dl_trl_handler)(struct rmnet_map_dl_ind_trl *dltrl);
struct list_head list;
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
index cb8bdf5..33da4bf 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_command.c
@@ -136,15 +136,6 @@
port->stats.dl_hdr_total_pkts += port->stats.dl_hdr_last_pkts;
port->stats.dl_hdr_count++;
- if (unlikely(!(port->stats.dl_hdr_count)))
- port->stats.dl_hdr_count = 1;
-
- port->stats.dl_hdr_avg_bytes = port->stats.dl_hdr_total_bytes /
- port->stats.dl_hdr_count;
-
- port->stats.dl_hdr_avg_pkts = port->stats.dl_hdr_total_pkts /
- port->stats.dl_hdr_count;
-
rmnet_map_dl_hdr_notify(port, dlhdr);
if (rmnet_perf) {
unsigned int pull_size;
@@ -261,11 +252,38 @@
int rmnet_map_dl_ind_register(struct rmnet_port *port,
struct rmnet_map_dl_ind *dl_ind)
{
+ struct rmnet_map_dl_ind *dl_ind_iterator;
+ bool empty_ind_list = true;
+
if (!port || !dl_ind || !dl_ind->dl_hdr_handler ||
!dl_ind->dl_trl_handler)
return -EINVAL;
- list_add_rcu(&dl_ind->list, &port->dl_list);
+ list_for_each_entry_rcu(dl_ind_iterator, &port->dl_list, list) {
+ empty_ind_list = false;
+ if (dl_ind_iterator->priority < dl_ind->priority) {
+ if (dl_ind_iterator->list.next) {
+ if (dl_ind->priority
+ < list_entry_rcu(dl_ind_iterator->list.next,
+ typeof(*dl_ind_iterator), list)->priority) {
+ list_add_rcu(&dl_ind->list,
+ &dl_ind_iterator->list);
+ break;
+ }
+ } else {
+ list_add_rcu(&dl_ind->list,
+ &dl_ind_iterator->list);
+ break;
+ }
+ } else {
+ list_add_tail_rcu(&dl_ind->list,
+ &dl_ind_iterator->list);
+ break;
+ }
+ }
+
+ if (empty_ind_list)
+ list_add_rcu(&dl_ind->list, &port->dl_list);
return 0;
}
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
index 8c13eed..e7b25ad 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_map_data.c
@@ -1044,11 +1044,6 @@
return rc;
}
-struct rmnet_agg_work {
- struct work_struct work;
- struct rmnet_port *port;
-};
-
long rmnet_agg_time_limit __read_mostly = 1000000L;
long rmnet_agg_bypass_time __read_mostly = 10000000L;
@@ -1082,22 +1077,17 @@
static void rmnet_map_flush_tx_packet_work(struct work_struct *work)
{
- struct rmnet_agg_work *real_work;
+ struct sk_buff *skb = NULL;
struct rmnet_port *port;
unsigned long flags;
- struct sk_buff *skb;
- int agg_count = 0;
- real_work = (struct rmnet_agg_work *)work;
- port = real_work->port;
- skb = NULL;
+ port = container_of(work, struct rmnet_port, agg_wq);
spin_lock_irqsave(&port->agg_lock, flags);
if (likely(port->agg_state == -EINPROGRESS)) {
/* Buffer may have already been shipped out */
if (likely(port->agg_skb)) {
skb = port->agg_skb;
- agg_count = port->agg_count;
port->agg_skb = NULL;
port->agg_count = 0;
memset(&port->agg_time, 0, sizeof(struct timespec));
@@ -1108,27 +1098,15 @@
spin_unlock_irqrestore(&port->agg_lock, flags);
if (skb)
dev_queue_xmit(skb);
-
- kfree(work);
}
enum hrtimer_restart rmnet_map_flush_tx_packet_queue(struct hrtimer *t)
{
- struct rmnet_agg_work *work;
struct rmnet_port *port;
port = container_of(t, struct rmnet_port, hrtimer);
- work = kmalloc(sizeof(*work), GFP_ATOMIC);
- if (!work) {
- port->agg_state = 0;
-
- return HRTIMER_NORESTART;
- }
-
- INIT_WORK(&work->work, rmnet_map_flush_tx_packet_work);
- work->port = port;
- schedule_work((struct work_struct *)work);
+ schedule_work(&port->agg_wq);
return HRTIMER_NORESTART;
}
@@ -1150,15 +1128,16 @@
* sparse, don't aggregate. We will need to tune this later
*/
diff = timespec_sub(port->agg_last, last);
+ size = port->egress_agg_size - skb->len;
- if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time) {
+ if (diff.tv_sec > 0 || diff.tv_nsec > rmnet_agg_bypass_time ||
+ size <= 0) {
spin_unlock_irqrestore(&port->agg_lock, flags);
skb->protocol = htons(ETH_P_MAP);
dev_queue_xmit(skb);
return;
}
- size = port->egress_agg_size - skb->len;
port->agg_skb = skb_copy_expand(skb, 0, size, GFP_ATOMIC);
if (!port->agg_skb) {
port->agg_skb = 0;
@@ -1213,6 +1192,8 @@
port->egress_agg_size = 8192;
port->egress_agg_count = 20;
spin_lock_init(&port->agg_lock);
+
+ INIT_WORK(&port->agg_wq, rmnet_map_flush_tx_packet_work);
}
void rmnet_map_tx_aggregate_exit(struct rmnet_port *port)
@@ -1220,6 +1201,8 @@
unsigned long flags;
hrtimer_cancel(&port->hrtimer);
+ cancel_work_sync(&port->agg_wq);
+
spin_lock_irqsave(&port->agg_lock, flags);
if (port->agg_state == -EINPROGRESS) {
if (port->agg_skb) {
diff --git a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
index 2c4c825..e6bba00 100644
--- a/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
+++ b/drivers/net/ethernet/qualcomm/rmnet/rmnet_vnd.c
@@ -16,6 +16,7 @@
#include "rmnet_vnd.h"
#include <soc/qcom/qmi_rmnet.h>
+#include <soc/qcom/rmnet_qmi.h>
#include <trace/events/rmnet.h>
/* RX/TX Fixup */
@@ -65,6 +66,7 @@
trace_rmnet_xmit_skb(skb);
rmnet_egress_handler(skb);
qmi_rmnet_burst_fc_check(dev, ip_type, mark, len);
+ qmi_rmnet_work_maybe_restart(rmnet_get_rmnet_port(dev));
} else {
this_cpu_inc(priv->pcpu_stats->stats.tx_drops);
kfree_skb(skb);
@@ -219,8 +221,6 @@
"DL header pkts received",
"DL header total bytes received",
"DL header total pkts received",
- "DL header average bytes",
- "DL header average packets",
"DL trailer last seen sequence",
"DL trailer pkts received",
};
@@ -311,10 +311,6 @@
rmnet_dev->needs_free_netdev = true;
rmnet_dev->ethtool_ops = &rmnet_ethtool_ops;
-
- /* This perm addr will be used as interface identifier by IPv6 */
- rmnet_dev->addr_assign_type = NET_ADDR_RANDOM;
- eth_random_addr(rmnet_dev->perm_addr);
}
/* Exposed API */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 8441c86..5f092bb 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -459,7 +459,7 @@
RCR_EFFS | RCR_ENCF | RCR_ETS0 | RCR_ESF | 0x18000000, RCR);
/* Set FIFO size */
- ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00222200, TGC);
+ ravb_write(ndev, TGC_TQP_AVBMODE1 | 0x00112200, TGC);
/* Timestamp enable */
ravb_write(ndev, TCCR_TFEN, TCCR);
diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 7b92336..3b174ea 100644
--- a/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -1342,8 +1342,10 @@
}
ret = phy_power_on(bsp_priv, true);
- if (ret)
+ if (ret) {
+ gmac_clk_enable(bsp_priv, false);
return ret;
+ }
pm_runtime_enable(dev);
pm_runtime_get_sync(dev);
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
index 9caf79b..4d5fb4b 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_ethtool.c
@@ -719,8 +719,11 @@
{
unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
- if (!clk)
- return 0;
+ if (!clk) {
+ clk = priv->plat->clk_ref_rate;
+ if (!clk)
+ return 0;
+ }
return (usec * (clk / 1000000)) / 256;
}
@@ -729,8 +732,11 @@
{
unsigned long clk = clk_get_rate(priv->plat->stmmac_clk);
- if (!clk)
- return 0;
+ if (!clk) {
+ clk = priv->plat->clk_ref_rate;
+ if (!clk)
+ return 0;
+ }
return (riwt * 256) / (clk / 1000000);
}
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 123b74e..43ab9e9 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -3028,10 +3028,22 @@
tx_q = &priv->tx_queue[queue];
+ if (priv->tx_path_in_lpi_mode)
+ stmmac_disable_eee_mode(priv);
+
/* Manage oversized TCP frames for GMAC4 device */
if (skb_is_gso(skb) && priv->tso) {
- if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))
+ if (skb_shinfo(skb)->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)) {
+ /*
+ * There is no way to determine the number of TSO
+ * capable Queues. Let's use always the Queue 0
+ * because if TSO is supported then at least this
+ * one will be capable.
+ */
+ skb_set_queue_mapping(skb, 0);
+
return stmmac_tso_xmit(skb, dev);
+ }
}
if (unlikely(stmmac_tx_avail(priv, queue) < nfrags + 1)) {
@@ -3046,9 +3058,6 @@
return NETDEV_TX_BUSY;
}
- if (priv->tx_path_in_lpi_mode)
- stmmac_disable_eee_mode(priv);
-
entry = tx_q->cur_tx;
first_entry = entry;
WARN_ON(tx_q->tx_skbuff[first_entry]);
diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c
index 01711e6..e1427b5 100644
--- a/drivers/net/geneve.c
+++ b/drivers/net/geneve.c
@@ -636,15 +636,20 @@
static int geneve_open(struct net_device *dev)
{
struct geneve_dev *geneve = netdev_priv(dev);
- bool ipv6 = !!(geneve->info.mode & IP_TUNNEL_INFO_IPV6);
bool metadata = geneve->collect_md;
+ bool ipv4, ipv6;
int ret = 0;
+ ipv6 = geneve->info.mode & IP_TUNNEL_INFO_IPV6 || metadata;
+ ipv4 = !ipv6 || metadata;
#if IS_ENABLED(CONFIG_IPV6)
- if (ipv6 || metadata)
+ if (ipv6) {
ret = geneve_sock_add(geneve, true);
+ if (ret < 0 && ret != -EAFNOSUPPORT)
+ ipv4 = false;
+ }
#endif
- if (!ret && (!ipv6 || metadata))
+ if (ipv4)
ret = geneve_sock_add(geneve, false);
if (ret < 0)
geneve_sock_release(geneve);
diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c
index c9e2a98..c832040 100644
--- a/drivers/net/hyperv/netvsc_drv.c
+++ b/drivers/net/hyperv/netvsc_drv.c
@@ -743,6 +743,14 @@
schedule_delayed_work(&ndev_ctx->dwork, 0);
}
+static void netvsc_comp_ipcsum(struct sk_buff *skb)
+{
+ struct iphdr *iph = (struct iphdr *)skb->data;
+
+ iph->check = 0;
+ iph->check = ip_fast_csum(iph, iph->ihl);
+}
+
static struct sk_buff *netvsc_alloc_recv_skb(struct net_device *net,
struct napi_struct *napi,
const struct ndis_tcp_ip_checksum_info *csum_info,
@@ -766,9 +774,17 @@
/* skb is already created with CHECKSUM_NONE */
skb_checksum_none_assert(skb);
- /*
- * In Linux, the IP checksum is always checked.
- * Do L4 checksum offload if enabled and present.
+ /* Incoming packets may have IP header checksum verified by the host.
+ * They may not have IP header checksum computed after coalescing.
+ * We compute it here if the flags are set, because on Linux, the IP
+ * checksum is always checked.
+ */
+ if (csum_info && csum_info->receive.ip_checksum_value_invalid &&
+ csum_info->receive.ip_checksum_succeeded &&
+ skb->protocol == htons(ETH_P_IP))
+ netvsc_comp_ipcsum(skb);
+
+ /* Do L4 checksum offload if enabled and present.
*/
if (csum_info && (net->features & NETIF_F_RXCSUM)) {
if (csum_info->receive.tcp_checksum_succeeded ||
diff --git a/drivers/net/ipvlan/ipvlan_main.c b/drivers/net/ipvlan/ipvlan_main.c
index 5fb5418..68b8007 100644
--- a/drivers/net/ipvlan/ipvlan_main.c
+++ b/drivers/net/ipvlan/ipvlan_main.c
@@ -494,6 +494,8 @@
if (!data)
return 0;
+ if (!ns_capable(dev_net(ipvlan->phy_dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
if (data[IFLA_IPVLAN_MODE]) {
u16 nmode = nla_get_u16(data[IFLA_IPVLAN_MODE]);
@@ -596,6 +598,8 @@
struct ipvl_dev *tmp = netdev_priv(phy_dev);
phy_dev = tmp->phy_dev;
+ if (!ns_capable(dev_net(phy_dev)->user_ns, CAP_NET_ADMIN))
+ return -EPERM;
} else if (!netif_is_ipvlan_port(phy_dev)) {
/* Exit early if the underlying link is invalid or busy */
if (phy_dev->type != ARPHRD_ETHER ||
diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c
index 15c5586..c5588d4 100644
--- a/drivers/net/phy/mdio_bus.c
+++ b/drivers/net/phy/mdio_bus.c
@@ -380,7 +380,6 @@
err = device_register(&bus->dev);
if (err) {
pr_err("mii_bus %s failed to register\n", bus->id);
- put_device(&bus->dev);
return -EINVAL;
}
diff --git a/drivers/net/phy/micrel.c b/drivers/net/phy/micrel.c
index 3db06b4..05a6ae3 100644
--- a/drivers/net/phy/micrel.c
+++ b/drivers/net/phy/micrel.c
@@ -339,6 +339,17 @@
return genphy_config_aneg(phydev);
}
+static int ksz8061_config_init(struct phy_device *phydev)
+{
+ int ret;
+
+ ret = phy_write_mmd(phydev, MDIO_MMD_PMAPMD, MDIO_DEVID1, 0xB61A);
+ if (ret)
+ return ret;
+
+ return kszphy_config_init(phydev);
+}
+
static int ksz9021_load_values_from_of(struct phy_device *phydev,
const struct device_node *of_node,
u16 reg,
@@ -934,7 +945,7 @@
.phy_id_mask = MICREL_PHY_ID_MASK,
.features = PHY_BASIC_FEATURES,
.flags = PHY_HAS_INTERRUPT,
- .config_init = kszphy_config_init,
+ .config_init = ksz8061_config_init,
.ack_interrupt = kszphy_ack_interrupt,
.config_intr = kszphy_config_intr,
.suspend = genphy_suspend,
diff --git a/drivers/net/phy/phylink.c b/drivers/net/phy/phylink.c
index 2787e8b..f6e70f2 100644
--- a/drivers/net/phy/phylink.c
+++ b/drivers/net/phy/phylink.c
@@ -348,6 +348,10 @@
linkmode_zero(state->lp_advertising);
state->interface = pl->link_config.interface;
state->an_enabled = pl->link_config.an_enabled;
+ state->speed = SPEED_UNKNOWN;
+ state->duplex = DUPLEX_UNKNOWN;
+ state->pause = MLO_PAUSE_NONE;
+ state->an_complete = 0;
state->link = 1;
return pl->ops->mac_link_state(ndev, state);
diff --git a/drivers/net/ppp/pptp.c b/drivers/net/ppp/pptp.c
index 8f09edd..50c6055 100644
--- a/drivers/net/ppp/pptp.c
+++ b/drivers/net/ppp/pptp.c
@@ -532,6 +532,7 @@
pppox_unbind_sock(sk);
}
skb_queue_purge(&sk->sk_receive_queue);
+ dst_release(rcu_dereference_protected(sk->sk_dst_cache, 1));
}
static int pptp_create(struct net *net, struct socket *sock, int kern)
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index 723814d..95ee9d8 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1259,7 +1259,7 @@
list_add_tail_rcu(&port->list, &team->port_list);
team_port_enable(team, port);
__team_compute_features(team);
- __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
+ __team_port_change_port_added(port, !!netif_oper_up(port_dev));
__team_options_change_check(team);
netdev_info(dev, "Port device %s added\n", portname);
@@ -2918,7 +2918,7 @@
switch (event) {
case NETDEV_UP:
- if (netif_carrier_ok(dev))
+ if (netif_oper_up(dev))
team_port_change_check(port, true);
break;
case NETDEV_DOWN:
diff --git a/drivers/net/team/team_mode_loadbalance.c b/drivers/net/team/team_mode_loadbalance.c
index a5ef970..5541e1c 100644
--- a/drivers/net/team/team_mode_loadbalance.c
+++ b/drivers/net/team/team_mode_loadbalance.c
@@ -325,6 +325,20 @@
return 0;
}
+static void lb_bpf_func_free(struct team *team)
+{
+ struct lb_priv *lb_priv = get_lb_priv(team);
+ struct bpf_prog *fp;
+
+ if (!lb_priv->ex->orig_fprog)
+ return;
+
+ __fprog_destroy(lb_priv->ex->orig_fprog);
+ fp = rcu_dereference_protected(lb_priv->fp,
+ lockdep_is_held(&team->lock));
+ bpf_prog_destroy(fp);
+}
+
static int lb_tx_method_get(struct team *team, struct team_gsetter_ctx *ctx)
{
struct lb_priv *lb_priv = get_lb_priv(team);
@@ -639,6 +653,7 @@
team_options_unregister(team, lb_options,
ARRAY_SIZE(lb_options));
+ lb_bpf_func_free(team);
cancel_delayed_work_sync(&lb_priv->ex->stats.refresh_dw);
free_percpu(lb_priv->pcpu_stats);
kfree(lb_priv->ex);
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 0baade2..ee4f901 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -2126,9 +2126,9 @@
}
add_wait_queue(&tfile->wq.wait, &wait);
- current->state = TASK_INTERRUPTIBLE;
while (1) {
+ set_current_state(TASK_INTERRUPTIBLE);
ptr = ptr_ring_consume(&tfile->tx_ring);
if (ptr)
break;
@@ -2144,7 +2144,7 @@
schedule();
}
- current->state = TASK_RUNNING;
+ __set_current_state(TASK_RUNNING);
remove_wait_queue(&tfile->wq.wait, &wait);
out:
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 735ad83..6e38135 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -976,6 +976,13 @@
0xff),
.driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
},
+ { /* Quectel EG12/EM12 */
+ USB_DEVICE_AND_INTERFACE_INFO(0x2c7c, 0x0512,
+ USB_CLASS_VENDOR_SPEC,
+ USB_SUBCLASS_VENDOR_SPEC,
+ 0xff),
+ .driver_info = (unsigned long)&qmi_wwan_info_quirk_dtr,
+ },
/* 3. Combined interface devices matching on interface number */
{QMI_FIXED_INTF(0x0408, 0xea42, 4)}, /* Yota / Megafon M100-1 */
@@ -1343,17 +1350,20 @@
return false;
}
-static bool quectel_ep06_diag_detected(struct usb_interface *intf)
+static bool quectel_diag_detected(struct usb_interface *intf)
{
struct usb_device *dev = interface_to_usbdev(intf);
struct usb_interface_descriptor intf_desc = intf->cur_altsetting->desc;
+ u16 id_vendor = le16_to_cpu(dev->descriptor.idVendor);
+ u16 id_product = le16_to_cpu(dev->descriptor.idProduct);
- if (le16_to_cpu(dev->descriptor.idVendor) == 0x2c7c &&
- le16_to_cpu(dev->descriptor.idProduct) == 0x0306 &&
- intf_desc.bNumEndpoints == 2)
+ if (id_vendor != 0x2c7c || intf_desc.bNumEndpoints != 2)
+ return false;
+
+ if (id_product == 0x0306 || id_product == 0x0512)
return true;
-
- return false;
+ else
+ return false;
}
static int qmi_wwan_probe(struct usb_interface *intf,
@@ -1390,13 +1400,13 @@
return -ENODEV;
}
- /* Quectel EP06/EM06/EG06 supports dynamic interface configuration, so
+ /* Several Quectel modems supports dynamic interface configuration, so
* we need to match on class/subclass/protocol. These values are
* identical for the diagnostic- and QMI-interface, but bNumEndpoints is
* different. Ignore the current interface if the number of endpoints
* the number for the diag interface (two).
*/
- if (quectel_ep06_diag_detected(intf))
+ if (quectel_diag_detected(intf))
return -ENODEV;
return usbnet_probe(intf, id);
diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
index 9fc9aed6..52387f7 100644
--- a/drivers/net/vxlan.c
+++ b/drivers/net/vxlan.c
@@ -1469,6 +1469,14 @@
goto drop;
}
+ rcu_read_lock();
+
+ if (unlikely(!(vxlan->dev->flags & IFF_UP))) {
+ rcu_read_unlock();
+ atomic_long_inc(&vxlan->dev->rx_dropped);
+ goto drop;
+ }
+
stats = this_cpu_ptr(vxlan->dev->tstats);
u64_stats_update_begin(&stats->syncp);
stats->rx_packets++;
@@ -1476,6 +1484,9 @@
u64_stats_update_end(&stats->syncp);
gro_cells_receive(&vxlan->gro_cells, skb);
+
+ rcu_read_unlock();
+
return 0;
drop:
@@ -2460,6 +2471,8 @@
{
struct vxlan_dev *vxlan = netdev_priv(dev);
+ gro_cells_destroy(&vxlan->gro_cells);
+
vxlan_fdb_delete_default(vxlan, vxlan->cfg.vni);
free_percpu(dev->tstats);
@@ -3526,7 +3539,6 @@
vxlan_flush(vxlan, true);
- gro_cells_destroy(&vxlan->gro_cells);
list_del(&vxlan->next);
unregister_netdevice_queue(dev, head);
}
diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c
index c070a9e..fae572b 100644
--- a/drivers/net/wireless/ath/ath9k/init.c
+++ b/drivers/net/wireless/ath/ath9k/init.c
@@ -636,15 +636,15 @@
ret = ath9k_eeprom_request(sc, eeprom_name);
if (ret)
return ret;
+
+ ah->ah_flags &= ~AH_USE_EEPROM;
+ ah->ah_flags |= AH_NO_EEP_SWAP;
}
mac = of_get_mac_address(np);
if (mac)
ether_addr_copy(common->macaddr, mac);
- ah->ah_flags &= ~AH_USE_EEPROM;
- ah->ah_flags |= AH_NO_EEP_SWAP;
-
return 0;
}
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index 163c84a..b09a32e 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -620,7 +620,7 @@
ret = cnss_pci_start_mhi(pci_priv);
if (ret) {
- cnss_pr_err("Failed to start MHI, err = %d\n", ret);
+ cnss_fatal_err("Failed to start MHI, err = %d\n", ret);
if (!test_bit(CNSS_DEV_ERR_NOTIFY, &plat_priv->driver_state) &&
!pci_priv->pci_link_down_ind && timeout)
mod_timer(&plat_priv->fw_boot_timer,
@@ -1658,7 +1658,7 @@
ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
if (ret) {
- cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
+ cnss_fatal_err("Failed to trigger RDDM, err = %d\n", ret);
cnss_schedule_recovery(&pci_priv->pci_dev->dev,
CNSS_REASON_DEFAULT);
return ret;
@@ -2149,7 +2149,8 @@
ret = mhi_download_rddm_img(pci_priv->mhi_ctrl, in_panic);
if (ret) {
- cnss_pr_err("Failed to download RDDM image, err = %d\n", ret);
+ cnss_fatal_err("Failed to download RDDM image, err = %d\n",
+ ret);
cnss_pci_dump_registers(pci_priv);
return;
}
diff --git a/drivers/net/wireless/ti/wlcore/sdio.c b/drivers/net/wireless/ti/wlcore/sdio.c
index 750bea3..627df16 100644
--- a/drivers/net/wireless/ti/wlcore/sdio.c
+++ b/drivers/net/wireless/ti/wlcore/sdio.c
@@ -164,6 +164,12 @@
}
sdio_claim_host(func);
+ /*
+ * To guarantee that the SDIO card is power cycled, as required to make
+ * the FW programming to succeed, let's do a brute force HW reset.
+ */
+ mmc_hw_reset(card->host);
+
sdio_enable_func(func);
sdio_release_host(func);
@@ -174,20 +180,13 @@
{
struct sdio_func *func = dev_to_sdio_func(glue->dev);
struct mmc_card *card = func->card;
- int error;
sdio_claim_host(func);
sdio_disable_func(func);
sdio_release_host(func);
/* Let runtime PM know the card is powered off */
- error = pm_runtime_put(&card->dev);
- if (error < 0 && error != -EBUSY) {
- dev_err(&card->dev, "%s failed: %i\n", __func__, error);
-
- return error;
- }
-
+ pm_runtime_put(&card->dev);
return 0;
}
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 0ccb021..10d580c 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -454,6 +454,8 @@
if (xenvif_hash_cache_size == 0)
return;
+ BUG_ON(vif->hash.cache.count);
+
spin_lock_init(&vif->hash.cache.lock);
INIT_LIST_HEAD(&vif->hash.cache.list);
}
diff --git a/drivers/net/xen-netback/interface.c b/drivers/net/xen-netback/interface.c
index f6ae23f..82add0a 100644
--- a/drivers/net/xen-netback/interface.c
+++ b/drivers/net/xen-netback/interface.c
@@ -153,6 +153,13 @@
{
struct xenvif *vif = netdev_priv(dev);
unsigned int size = vif->hash.size;
+ unsigned int num_queues;
+
+ /* If queues are not set up internally - always return 0
+ * as the packet going to be dropped anyway */
+ num_queues = READ_ONCE(vif->num_queues);
+ if (num_queues < 1)
+ return 0;
if (vif->hash.alg == XEN_NETIF_CTRL_HASH_ALGORITHM_NONE)
return fallback(dev, skb, NULL) % dev->real_num_tx_queues;
diff --git a/drivers/net/xen-netback/netback.c b/drivers/net/xen-netback/netback.c
index 3621e05..d5081ff 100644
--- a/drivers/net/xen-netback/netback.c
+++ b/drivers/net/xen-netback/netback.c
@@ -1072,11 +1072,6 @@
skb_frag_size_set(&frags[i], len);
}
- /* Copied all the bits from the frag list -- free it. */
- skb_frag_list_init(skb);
- xenvif_skb_zerocopy_prepare(queue, nskb);
- kfree_skb(nskb);
-
/* Release all the original (foreign) frags. */
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
skb_frag_unref(skb, f);
@@ -1145,6 +1140,8 @@
xenvif_fill_frags(queue, skb);
if (unlikely(skb_has_frag_list(skb))) {
+ struct sk_buff *nskb = skb_shinfo(skb)->frag_list;
+ xenvif_skb_zerocopy_prepare(queue, nskb);
if (xenvif_handle_frag_list(queue, skb)) {
if (net_ratelimit())
netdev_err(queue->vif->dev,
@@ -1153,6 +1150,9 @@
kfree_skb(skb);
continue;
}
+ /* Copied all the bits from the frag list -- free it. */
+ skb_frag_list_init(skb);
+ kfree_skb(nskb);
}
skb->dev = queue->vif->dev;
diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c
index e0d2b74..2cdb303 100644
--- a/drivers/nvme/host/core.c
+++ b/drivers/nvme/host/core.c
@@ -1182,6 +1182,7 @@
* effects say only one namespace is affected.
*/
if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
+ mutex_lock(&ctrl->scan_lock);
nvme_start_freeze(ctrl);
nvme_wait_freeze(ctrl);
}
@@ -1210,8 +1211,10 @@
*/
if (effects & NVME_CMD_EFFECTS_LBCC)
nvme_update_formats(ctrl);
- if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK))
+ if (effects & (NVME_CMD_EFFECTS_LBCC | NVME_CMD_EFFECTS_CSE_MASK)) {
nvme_unfreeze(ctrl);
+ mutex_unlock(&ctrl->scan_lock);
+ }
if (effects & NVME_CMD_EFFECTS_CCC)
nvme_init_identify(ctrl);
if (effects & (NVME_CMD_EFFECTS_NIC | NVME_CMD_EFFECTS_NCC))
@@ -3292,6 +3295,7 @@
if (nvme_identify_ctrl(ctrl, &id))
return;
+ mutex_lock(&ctrl->scan_lock);
nn = le32_to_cpu(id->nn);
if (ctrl->vs >= NVME_VS(1, 1, 0) &&
!(ctrl->quirks & NVME_QUIRK_IDENTIFY_CNS)) {
@@ -3300,6 +3304,7 @@
}
nvme_scan_ns_sequential(ctrl, nn);
out_free_id:
+ mutex_unlock(&ctrl->scan_lock);
kfree(id);
down_write(&ctrl->namespaces_rwsem);
list_sort(NULL, &ctrl->namespaces, ns_cmp);
@@ -3535,6 +3540,7 @@
ctrl->state = NVME_CTRL_NEW;
spin_lock_init(&ctrl->lock);
+ mutex_init(&ctrl->scan_lock);
INIT_LIST_HEAD(&ctrl->namespaces);
init_rwsem(&ctrl->namespaces_rwsem);
ctrl->dev = dev;
diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h
index 60220de..e82cdae 100644
--- a/drivers/nvme/host/nvme.h
+++ b/drivers/nvme/host/nvme.h
@@ -148,6 +148,7 @@
enum nvme_ctrl_state state;
bool identified;
spinlock_t lock;
+ struct mutex scan_lock;
const struct nvme_ctrl_ops *ops;
struct request_queue *admin_q;
struct request_queue *connect_q;
diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c
index f46313f..7b9ef8e 100644
--- a/drivers/nvme/host/pci.c
+++ b/drivers/nvme/host/pci.c
@@ -2260,6 +2260,27 @@
if (dev->ctrl.ctrl_config & NVME_CC_ENABLE)
nvme_dev_disable(dev, false);
+ mutex_lock(&dev->shutdown_lock);
+ result = nvme_pci_enable(dev);
+ if (result)
+ goto out_unlock;
+
+ result = nvme_pci_configure_admin_queue(dev);
+ if (result)
+ goto out_unlock;
+
+ result = nvme_alloc_admin_tags(dev);
+ if (result)
+ goto out_unlock;
+
+ /*
+ * Limit the max command size to prevent iod->sg allocations going
+ * over a single page.
+ */
+ dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
+ dev->ctrl.max_segments = NVME_MAX_SEGS;
+ mutex_unlock(&dev->shutdown_lock);
+
/*
* Introduce CONNECTING state from nvme-fc/rdma transports to mark the
* initializing procedure here.
@@ -2270,25 +2291,6 @@
goto out;
}
- result = nvme_pci_enable(dev);
- if (result)
- goto out;
-
- result = nvme_pci_configure_admin_queue(dev);
- if (result)
- goto out;
-
- result = nvme_alloc_admin_tags(dev);
- if (result)
- goto out;
-
- /*
- * Limit the max command size to prevent iod->sg allocations going
- * over a single page.
- */
- dev->ctrl.max_hw_sectors = NVME_MAX_KB_SZ << 1;
- dev->ctrl.max_segments = NVME_MAX_SEGS;
-
result = nvme_init_identify(&dev->ctrl);
if (result)
goto out;
@@ -2352,6 +2354,8 @@
nvme_start_ctrl(&dev->ctrl);
return;
+ out_unlock:
+ mutex_unlock(&dev->shutdown_lock);
out:
nvme_remove_dead_ctrl(dev, result);
}
diff --git a/drivers/of/irq.c b/drivers/of/irq.c
index 2653abf..e12f274 100644
--- a/drivers/of/irq.c
+++ b/drivers/of/irq.c
@@ -361,32 +361,36 @@
map += out_size;
map_len -= out_size;
}
- if (match) {
- /* Get the irqdomain-map-pass-thru property (optional) */
- pass = of_get_property(cur, pass_name, NULL);
- if (!pass)
- pass = dummy_pass;
- /*
- * Successfully parsed a irqdomain-map translation; copy new
- * specifier into the out structure, keeping the
- * bits specified in irqdomain-map-pass-thru.
- */
- match_array = map - out_size;
- for (i = 0; i < out_size; i++) {
- __be32 val = *(map - out_size + i);
-
- out->param[i] = in->param[i];
- if (i < in_size) {
- val &= ~pass[i];
- val |= cpu_to_be32(out->param[i]) & pass[i];
- }
-
- out->param[i] = be32_to_cpu(val);
- }
- out->param_count = in_size = out_size;
- out->fwnode = of_node_to_fwnode(new);
+ if (!match) {
+ ret = -EINVAL;
+ goto put;
}
+
+ /* Get the irqdomain-map-pass-thru property (optional) */
+ pass = of_get_property(cur, pass_name, NULL);
+ if (!pass)
+ pass = dummy_pass;
+
+ /*
+ * Successfully parsed a irqdomain-map translation; copy new
+ * specifier into the out structure, keeping the
+ * bits specified in irqdomain-map-pass-thru.
+ */
+ match_array = map - out_size;
+ for (i = 0; i < out_size; i++) {
+ __be32 val = *(map - out_size + i);
+
+ out->param[i] = in->param[i];
+ if (i < in_size) {
+ val &= ~pass[i];
+ val |= cpu_to_be32(out->param[i]) & pass[i];
+ }
+
+ out->param[i] = be32_to_cpu(val);
+ }
+ out->param_count = in_size = out_size;
+ out->fwnode = of_node_to_fwnode(new);
put:
of_node_put(cur);
of_node_put(new);
diff --git a/drivers/pci/controller/pci-msm.c b/drivers/pci/controller/pci-msm.c
index d25a75e..c1d3850 100644
--- a/drivers/pci/controller/pci-msm.c
+++ b/drivers/pci/controller/pci-msm.c
@@ -551,6 +551,7 @@
uint32_t wr_halt_size;
uint32_t slv_addr_space_size;
uint32_t phy_status_offset;
+ uint32_t phy_status_bit;
uint32_t phy_power_down_offset;
uint32_t cpl_timeout;
uint32_t current_bdf;
@@ -1197,6 +1198,8 @@
dev->slv_addr_space_size);
PCIE_DBG_FS(dev, "phy_status_offset: 0x%x\n",
dev->phy_status_offset);
+ PCIE_DBG_FS(dev, "phy_status_bit: %u\n",
+ dev->phy_status_bit);
PCIE_DBG_FS(dev, "phy_power_down_offset: 0x%x\n",
dev->phy_power_down_offset);
PCIE_DBG_FS(dev, "cpl_timeout: 0x%x\n",
@@ -3217,7 +3220,8 @@
if (dev->rumi)
return true;
- if (readl_relaxed(dev->phy + dev->phy_status_offset) & BIT(6))
+ if (readl_relaxed(dev->phy + dev->phy_status_offset) &
+ BIT(dev->phy_status_bit))
return false;
else
return true;
@@ -5717,6 +5721,11 @@
PCIE_DBG(pcie_dev, "RC%d: phy-status-offset: 0x%x.\n", pcie_dev->rc_idx,
pcie_dev->phy_status_offset);
+ of_property_read_u32(pdev->dev.of_node, "qcom,phy-status-bit",
+ &pcie_dev->phy_status_bit);
+ PCIE_DBG(pcie_dev, "RC%d: phy-status-bit: %u.\n", pcie_dev->rc_idx,
+ pcie_dev->phy_status_bit);
+
of_property_read_u32(of_node, "qcom,phy-power-down-offset",
&pcie_dev->phy_power_down_offset);
PCIE_DBG(pcie_dev, "RC%d: phy-power-down-offset: 0x%x.\n",
diff --git a/drivers/pinctrl/pinctrl-mcp23s08.c b/drivers/pinctrl/pinctrl-mcp23s08.c
index cf73a40..cecbce2 100644
--- a/drivers/pinctrl/pinctrl-mcp23s08.c
+++ b/drivers/pinctrl/pinctrl-mcp23s08.c
@@ -832,8 +832,13 @@
break;
case MCP_TYPE_S18:
+ one_regmap_config =
+ devm_kmemdup(dev, &mcp23x17_regmap,
+ sizeof(struct regmap_config), GFP_KERNEL);
+ if (!one_regmap_config)
+ return -ENOMEM;
mcp->regmap = devm_regmap_init(dev, &mcp23sxx_spi_regmap, mcp,
- &mcp23x17_regmap);
+ one_regmap_config);
mcp->reg_shift = 1;
mcp->chip.ngpio = 16;
mcp->chip.label = "mcp23s18";
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 1f28884..cb932cb 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -34,7 +34,7 @@
};
-const char *ipa3_event_name[] = {
+const char *ipa3_event_name[IPA_EVENT_MAX_NUM] = {
__stringify(WLAN_CLIENT_CONNECT),
__stringify(WLAN_CLIENT_DISCONNECT),
__stringify(WLAN_CLIENT_POWER_SAVE_MODE),
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index e2c362c..54c7fc7 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -26,9 +26,6 @@
#include "wil_platform.h"
#include "msm_11ad.h"
-#define SMMU_BASE 0x20000000 /* Device address range base */
-#define SMMU_SIZE ((SZ_1G * 4ULL) - SMMU_BASE)
-
#define WIGIG_ENABLE_DELAY 50
#define WIGIG_SUBSYS_NAME "WIGIG"
@@ -39,9 +36,12 @@
#define VDD_MIN_UV 1028000
#define VDD_MAX_UV 1028000
#define VDD_MAX_UA 575000
-#define VDDIO_MIN_UV 1950000
+#define VDDIO_MIN_UV 1824000
#define VDDIO_MAX_UV 2040000
#define VDDIO_MAX_UA 70300
+#define VDD_LDO_MIN_UV 1800000
+#define VDD_LDO_MAX_UV 1800000
+#define VDD_LDO_MAX_UA 100000
#define WIGIG_MIN_CPU_BOOST_KBPS 150000
@@ -92,15 +92,6 @@
struct pci_saved_state *golden_state;
struct msm_pcie_register_event pci_event;
- /* SMMU */
- bool use_smmu; /* have SMMU enabled? */
- int smmu_s1_en;
- int smmu_fast_map;
- int smmu_coherent;
- struct dma_iommu_mapping *mapping;
- u32 smmu_base;
- u32 smmu_size;
-
/* bus frequency scaling */
struct msm_bus_scale_pdata *bus_scale;
u32 msm_bus_handle;
@@ -122,8 +113,9 @@
/* external vregs and clocks */
struct msm11ad_vreg vdd;
struct msm11ad_vreg vddio;
- struct msm11ad_clk rf_clk3;
- struct msm11ad_clk rf_clk3_pin;
+ struct msm11ad_vreg vdd_ldo;
+ struct msm11ad_clk rf_clk;
+ struct msm11ad_clk rf_clk_pin;
/* cpu boost support */
bool use_cpu_boost;
@@ -256,8 +248,18 @@
ctx->vddio.min_uV = VDDIO_MIN_UV;
ctx->vddio.max_uA = VDDIO_MAX_UA;
+ rc = msm_11ad_init_vreg(dev, &ctx->vdd_ldo, "vdd-ldo");
+ if (rc)
+ goto vdd_ldo_fail;
+
+ ctx->vdd_ldo.max_uV = VDD_LDO_MAX_UV;
+ ctx->vdd_ldo.min_uV = VDD_LDO_MIN_UV;
+ ctx->vdd_ldo.max_uA = VDD_LDO_MAX_UA;
+
return rc;
+vdd_ldo_fail:
+ msm_11ad_release_vreg(dev, &ctx->vddio);
vddio_fail:
msm_11ad_release_vreg(dev, &ctx->vdd);
out:
@@ -266,6 +268,7 @@
static void msm_11ad_release_vregs(struct msm11ad_ctx *ctx)
{
+ msm_11ad_release_vreg(ctx->dev, &ctx->vdd_ldo);
msm_11ad_release_vreg(ctx->dev, &ctx->vdd);
msm_11ad_release_vreg(ctx->dev, &ctx->vddio);
}
@@ -381,8 +384,14 @@
if (rc)
goto vddio_fail;
+ rc = msm_11ad_enable_vreg(ctx, &ctx->vdd_ldo);
+ if (rc)
+ goto vdd_ldo_fail;
+
return rc;
+vdd_ldo_fail:
+ msm_11ad_disable_vreg(ctx, &ctx->vddio);
vddio_fail:
msm_11ad_disable_vreg(ctx, &ctx->vdd);
out:
@@ -391,10 +400,11 @@
static int msm_11ad_disable_vregs(struct msm11ad_ctx *ctx)
{
- if (!ctx->vdd.reg && !ctx->vddio.reg)
+ if (!ctx->vdd.reg && !ctx->vddio.reg && !ctx->vdd_ldo.reg)
goto out;
/* ignore errors on disable vreg */
+ msm_11ad_disable_vreg(ctx, &ctx->vdd_ldo);
msm_11ad_disable_vreg(ctx, &ctx->vdd);
msm_11ad_disable_vreg(ctx, &ctx->vddio);
@@ -446,13 +456,13 @@
{
int rc;
- rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3);
+ rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk);
if (rc)
return rc;
- rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3_pin);
+ rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk_pin);
if (rc)
- msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+ msm_11ad_disable_clk(ctx, &ctx->rf_clk);
return rc;
}
@@ -461,22 +471,22 @@
{
int rc;
struct device *dev = ctx->dev;
- int rf_clk3_pin_idx;
+ int rf_clk_pin_idx;
if (!of_property_read_bool(dev->of_node, "qcom,use-ext-clocks"))
return 0;
- rc = msm_11ad_init_clk(dev, &ctx->rf_clk3, "rf_clk3_clk");
+ rc = msm_11ad_init_clk(dev, &ctx->rf_clk, "rf_clk_clk");
if (rc)
return rc;
- rf_clk3_pin_idx = of_property_match_string(dev->of_node, "clock-names",
- "rf_clk3_pin_clk");
- if (rf_clk3_pin_idx >= 0) {
- rc = msm_11ad_init_clk(dev, &ctx->rf_clk3_pin,
- "rf_clk3_pin_clk");
+ rf_clk_pin_idx = of_property_match_string(dev->of_node, "clock-names",
+ "rf_clk_pin_clk");
+ if (rf_clk_pin_idx >= 0) {
+ rc = msm_11ad_init_clk(dev, &ctx->rf_clk_pin,
+ "rf_clk_pin_clk");
if (rc)
- msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3);
+ msm_11ad_release_clk(ctx->dev, &ctx->rf_clk);
}
return rc;
@@ -484,14 +494,14 @@
static void msm_11ad_release_clocks(struct msm11ad_ctx *ctx)
{
- msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3_pin);
- msm_11ad_release_clk(ctx->dev, &ctx->rf_clk3);
+ msm_11ad_release_clk(ctx->dev, &ctx->rf_clk_pin);
+ msm_11ad_release_clk(ctx->dev, &ctx->rf_clk);
}
static void msm_11ad_disable_clocks(struct msm11ad_ctx *ctx)
{
- msm_11ad_disable_clk(ctx, &ctx->rf_clk3_pin);
- msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+ msm_11ad_disable_clk(ctx, &ctx->rf_clk_pin);
+ msm_11ad_disable_clk(ctx, &ctx->rf_clk);
}
static int msm_11ad_turn_device_power_off(struct msm11ad_ctx *ctx)
@@ -769,86 +779,6 @@
return rc;
}
-static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
-{
- int atomic_ctx = 1;
- int rc;
- int force_pt_coherent = 1;
- int smmu_bypass = !ctx->smmu_s1_en;
-
- if (!ctx->use_smmu)
- return 0;
-
- dev_info(ctx->dev, "Initialize SMMU, bypass=%d, fastmap=%d, coherent=%d\n",
- smmu_bypass, ctx->smmu_fast_map, ctx->smmu_coherent);
-
- ctx->mapping = __depr_arm_iommu_create_mapping(&platform_bus_type,
- ctx->smmu_base, ctx->smmu_size);
- if (IS_ERR_OR_NULL(ctx->mapping)) {
- rc = PTR_ERR(ctx->mapping) ?: -ENODEV;
- dev_err(ctx->dev, "Failed to create IOMMU mapping (%d)\n", rc);
- return rc;
- }
-
- rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_ATOMIC,
- &atomic_ctx);
- if (rc) {
- dev_err(ctx->dev, "Set atomic attribute to SMMU failed (%d)\n",
- rc);
- goto release_mapping;
- }
-
- if (smmu_bypass) {
- rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_S1_BYPASS,
- &smmu_bypass);
- if (rc) {
- dev_err(ctx->dev, "Set bypass attribute to SMMU failed (%d)\n",
- rc);
- goto release_mapping;
- }
- } else {
- /* Set dma-coherent and page table coherency */
- if (ctx->smmu_coherent) {
- arch_setup_dma_ops(&ctx->pcidev->dev, 0, 0, NULL, true);
- rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
- &force_pt_coherent);
- if (rc) {
- dev_err(ctx->dev,
- "Set SMMU PAGE_TABLE_FORCE_COHERENT attr failed (%d)\n",
- rc);
- goto release_mapping;
- }
- }
-
- if (ctx->smmu_fast_map) {
- rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_FAST,
- &ctx->smmu_fast_map);
- if (rc) {
- dev_err(ctx->dev, "Set fast attribute to SMMU failed (%d)\n",
- rc);
- goto release_mapping;
- }
- }
- }
-
- rc = __depr_arm_iommu_attach_device(&ctx->pcidev->dev, ctx->mapping);
- if (rc) {
- dev_err(ctx->dev, "arm_iommu_attach_device failed (%d)\n", rc);
- goto release_mapping;
- }
- dev_dbg(ctx->dev, "attached to IOMMU\n");
-
- return 0;
-release_mapping:
- __depr_arm_iommu_release_mapping(ctx->mapping);
- ctx->mapping = NULL;
- return rc;
-}
-
static int msm_11ad_ssr_shutdown(const struct subsys_desc *subsys,
bool force_stop)
{
@@ -1091,7 +1021,6 @@
struct device_node *of_node = dev->of_node;
struct device_node *rc_node;
struct pci_dev *pcidev = NULL;
- u32 smmu_mapping[2];
int rc, i;
bool pcidev_found = false;
struct msm_pcie_register_event *pci_event;
@@ -1118,7 +1047,6 @@
* qcom,msm-bus,vectors-KBps =
* <100 512 0 0>,
* <100 512 600000 800000>;
- * qcom,smmu-support;
*};
* rc_node stands for "qcom,pcie", selected entries:
* cell-index = <1>; (ctx->rc_index)
@@ -1149,7 +1077,6 @@
dev_err(ctx->dev, "Parent PCIE device index not found\n");
return -EINVAL;
}
- ctx->use_smmu = of_property_read_bool(of_node, "qcom,smmu-support");
ctx->keep_radio_on_during_sleep = of_property_read_bool(of_node,
"qcom,keep-radio-on-during-sleep");
ctx->bus_scale = msm_bus_cl_get_pdata(pdev);
@@ -1158,28 +1085,6 @@
return -EINVAL;
}
- ctx->smmu_s1_en = of_property_read_bool(of_node, "qcom,smmu-s1-en");
- if (ctx->smmu_s1_en) {
- ctx->smmu_fast_map = of_property_read_bool(
- of_node, "qcom,smmu-fast-map");
- ctx->smmu_coherent = of_property_read_bool(
- of_node, "qcom,smmu-coherent");
- }
- rc = of_property_read_u32_array(dev->of_node, "qcom,smmu-mapping",
- smmu_mapping, 2);
- if (rc) {
- dev_err(ctx->dev,
- "Failed to read base/size smmu addresses %d, fallback to default\n",
- rc);
- ctx->smmu_base = SMMU_BASE;
- ctx->smmu_size = SMMU_SIZE;
- } else {
- ctx->smmu_base = smmu_mapping[0];
- ctx->smmu_size = smmu_mapping[1];
- }
- dev_dbg(ctx->dev, "smmu_base=0x%x smmu_sise=0x%x\n",
- ctx->smmu_base, ctx->smmu_size);
-
/*== execute ==*/
/* turn device on */
rc = msm_11ad_init_vregs(ctx);
@@ -1310,10 +1215,9 @@
" gpio_dc = %d\n"
" sleep_clk_en = %d\n"
" rc_index = %d\n"
- " use_smmu = %d\n"
" pcidev = %pK\n"
"}\n", ctx, ctx->gpio_en, ctx->gpio_dc, ctx->sleep_clk_en,
- ctx->rc_index, ctx->use_smmu, ctx->pcidev);
+ ctx->rc_index, ctx->pcidev);
platform_set_drvdata(pdev, ctx);
device_disable_async_suspend(&pcidev->dev);
@@ -1543,12 +1447,6 @@
ctx->msm_bus_handle = 0;
}
- if (ctx->use_smmu) {
- __depr_arm_iommu_detach_device(&ctx->pcidev->dev);
- __depr_arm_iommu_release_mapping(ctx->mapping);
- ctx->mapping = NULL;
- }
-
memset(&ctx->rops, 0, sizeof(ctx->rops));
ctx->wil_handle = NULL;
@@ -1587,12 +1485,12 @@
break;
case WIL_PLATFORM_EVT_PRE_RESET:
/*
- * Enable rf_clk3 clock before resetting the device to ensure
+ * Enable rf_clk clock before resetting the device to ensure
* stable ref clock during the device reset
*/
if (ctx->features &
BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL)) {
- rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk3);
+ rc = msm_11ad_enable_clk(ctx, &ctx->rf_clk);
if (rc) {
dev_err(ctx->dev,
"failed to enable clk, rc %d\n", rc);
@@ -1602,12 +1500,12 @@
break;
case WIL_PLATFORM_EVT_FW_RDY:
/*
- * Disable rf_clk3 clock after the device is up to allow
+ * Disable rf_clk clock after the device is up to allow
* the device to control it via its GPIO for power saving
*/
if (ctx->features &
BIT(WIL_PLATFORM_FEATURE_FW_EXT_CLK_CONTROL))
- msm_11ad_disable_clk(ctx, &ctx->rf_clk3);
+ msm_11ad_disable_clk(ctx, &ctx->rf_clk);
/*
* Save golden config space for pci linkdown recovery.
@@ -1659,6 +1557,10 @@
{
struct pci_dev *pcidev = to_pci_dev(dev);
struct msm11ad_ctx *ctx = pcidev2ctx(pcidev);
+ struct iommu_domain *domain;
+ int bypass = 0;
+ int fastmap = 0;
+ int coherent = 0;
if (!ctx) {
pr_err("Context not found for pcidev %pK\n", pcidev);
@@ -1673,11 +1575,19 @@
return NULL;
}
dev_info(ctx->dev, "msm_bus handle 0x%x\n", ctx->msm_bus_handle);
- /* smmu */
- if (msm_11ad_smmu_init(ctx)) {
- msm_bus_scale_unregister_client(ctx->msm_bus_handle);
- ctx->msm_bus_handle = 0;
- return NULL;
+
+ domain = iommu_get_domain_for_dev(&pcidev->dev);
+ if (domain) {
+ iommu_domain_get_attr(domain, DOMAIN_ATTR_S1_BYPASS, &bypass);
+ iommu_domain_get_attr(domain, DOMAIN_ATTR_FAST, &fastmap);
+ iommu_domain_get_attr(domain,
+ DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
+ &coherent);
+
+ dev_info(ctx->dev, "SMMU initialized, bypass=%d, fastmap=%d, coherent=%d\n",
+ bypass, fastmap, coherent);
+ } else {
+ dev_warn(ctx->dev, "Unable to get iommu domain\n");
}
/* subsystem restart */
diff --git a/drivers/platform/msm/qcom-geni-se.c b/drivers/platform/msm/qcom-geni-se.c
index e16246b..c9c574e 100644
--- a/drivers/platform/msm/qcom-geni-se.c
+++ b/drivers/platform/msm/qcom-geni-se.c
@@ -36,6 +36,11 @@
static unsigned long default_bus_bw_set[] = {0, 19200000, 50000000,
100000000, 150000000, 200000000, 236000000};
+struct bus_vectors {
+ int src;
+ int dst;
+};
+
/**
* @struct geni_se_device - Data structure to represent the QUPv3 Core
* @dev: Device pointer of the QUPv3 core.
@@ -45,18 +50,32 @@
* @iommu_s1_bypass: Bypass IOMMU stage 1 translation.
* @base: Base address of this instance of QUPv3 core.
* @bus_bw: Client handle to the bus bandwidth request.
+ * @bus_bw_noc: Client handle to the QUP clock and DDR path bus
+ bandwidth request.
* @bus_mas_id: Master Endpoint ID for bus BW request.
* @bus_slv_id: Slave Endpoint ID for bus BW request.
* @geni_dev_lock: Lock to protect the bus ab & ib values, list.
* @ab_list_head: Sorted resource list based on average bus BW.
* @ib_list_head: Sorted resource list based on instantaneous bus BW.
+ * @ab_list_head_noc: Sorted resource list based on average DDR path bus BW.
+ * @ib_list_head_noc: Sorted resource list based on instantaneous DDR path
+ bus BW.
* @cur_ab: Current Bus Average BW request value.
* @cur_ib: Current Bus Instantaneous BW request value.
+ * @cur_ab_noc: Current DDR Bus Average BW request value.
+ * @cur_ib_noc: Current DDR Bus Instantaneous BW request value.
* @bus_bw_set: Clock plan for the bus driver.
+ * @bus_bw_set_noc: Clock plan for DDR path.
* @cur_bus_bw_idx: Current index within the bus clock plan.
+ * @cur_bus_bw_idx_noc: Current index within the DDR path clock plan.
* @num_clk_levels: Number of valid clock levels in clk_perf_tbl.
* @clk_perf_tbl: Table of clock frequency input to Serial Engine clock.
- * @log_ctx: Logging context to hold the debug information
+ * @log_ctx: Logging context to hold the debug information.
+ * @vectors: Structure to store Master End and Slave End IDs for
+ QUPv3 clock and DDR path bus BW request.
+ * @num_paths: Two paths. QUPv3 clock and DDR paths.
+ * @num_usecases: One usecase to vote for both QUPv3 clock and DDR paths.
+ * @pdata: To register our client handle with the ICB driver.
*/
struct geni_se_device {
struct device *dev;
@@ -66,19 +85,31 @@
bool iommu_s1_bypass;
void __iomem *base;
struct msm_bus_client_handle *bus_bw;
+ uint32_t bus_bw_noc;
u32 bus_mas_id;
u32 bus_slv_id;
struct mutex geni_dev_lock;
struct list_head ab_list_head;
struct list_head ib_list_head;
+ struct list_head ab_list_head_noc;
+ struct list_head ib_list_head_noc;
unsigned long cur_ab;
unsigned long cur_ib;
+ unsigned long cur_ab_noc;
+ unsigned long cur_ib_noc;
int bus_bw_set_size;
+ int bus_bw_set_size_noc;
unsigned long *bus_bw_set;
+ unsigned long *bus_bw_set_noc;
int cur_bus_bw_idx;
+ int cur_bus_bw_idx_noc;
unsigned int num_clk_levels;
unsigned long *clk_perf_tbl;
void *log_ctx;
+ struct bus_vectors *vectors;
+ int num_paths;
+ int num_usecases;
+ struct msm_bus_scale_pdata *pdata;
};
/* Offset of QUPV3 Hardware Version Register */
@@ -641,11 +672,37 @@
return bus_bw_update;
}
+static bool geni_se_check_bus_bw_noc(struct geni_se_device *geni_se_dev)
+{
+ int i;
+ int new_bus_bw_idx = geni_se_dev->bus_bw_set_size_noc - 1;
+ unsigned long new_bus_bw;
+ bool bus_bw_update = false;
+
+ new_bus_bw = max(geni_se_dev->cur_ib_noc, geni_se_dev->cur_ab_noc) /
+ DEFAULT_BUS_WIDTH;
+
+ for (i = 0; i < geni_se_dev->bus_bw_set_size_noc; i++) {
+ if (geni_se_dev->bus_bw_set_noc[i] >= new_bus_bw) {
+ new_bus_bw_idx = i;
+ break;
+ }
+ }
+
+ if (geni_se_dev->cur_bus_bw_idx_noc != new_bus_bw_idx) {
+ geni_se_dev->cur_bus_bw_idx_noc = new_bus_bw_idx;
+ bus_bw_update = true;
+ }
+
+ return bus_bw_update;
+}
+
static int geni_se_rmv_ab_ib(struct geni_se_device *geni_se_dev,
struct se_geni_rsc *rsc)
{
struct se_geni_rsc *tmp;
bool bus_bw_update = false;
+ bool bus_bw_update_noc = false;
int ret = 0;
if (unlikely(list_empty(&rsc->ab_list) || list_empty(&rsc->ib_list)))
@@ -664,14 +721,51 @@
geni_se_dev->cur_ib = 0;
bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
- if (bus_bw_update)
+
+ if (geni_se_dev->num_paths == 2) {
+ geni_se_dev->pdata->usecase[1].vectors[0].ab =
+ geni_se_dev->cur_ab;
+ geni_se_dev->pdata->usecase[1].vectors[0].ib =
+ geni_se_dev->cur_ib;
+ }
+
+ if (bus_bw_update && geni_se_dev->num_paths != 2) {
ret = msm_bus_scale_update_bw(geni_se_dev->bus_bw,
geni_se_dev->cur_ab,
geni_se_dev->cur_ib);
- GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+ GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
"%s: %s: cur_ab_ib(%lu:%lu) req_ab_ib(%lu:%lu) %d\n",
__func__, dev_name(rsc->ctrl_dev), geni_se_dev->cur_ab,
geni_se_dev->cur_ib, rsc->ab, rsc->ib, bus_bw_update);
+ }
+
+ if (geni_se_dev->num_paths == 2) {
+ if (unlikely(list_empty(&rsc->ab_list_noc) ||
+ list_empty(&rsc->ib_list_noc)))
+ return -EINVAL;
+
+ list_del_init(&rsc->ab_list_noc);
+ geni_se_dev->cur_ab_noc -= rsc->ab_noc;
+
+ list_del_init(&rsc->ib_list_noc);
+ tmp = list_first_entry_or_null(&geni_se_dev->ib_list_head_noc,
+ struct se_geni_rsc, ib_list_noc);
+ if (tmp && tmp->ib_noc != geni_se_dev->cur_ib_noc)
+ geni_se_dev->cur_ib_noc = tmp->ib_noc;
+ else if (!tmp && geni_se_dev->cur_ib_noc)
+ geni_se_dev->cur_ib_noc = 0;
+
+ bus_bw_update_noc = geni_se_check_bus_bw_noc(geni_se_dev);
+
+ geni_se_dev->pdata->usecase[1].vectors[1].ab =
+ geni_se_dev->cur_ab_noc;
+ geni_se_dev->pdata->usecase[1].vectors[1].ib =
+ geni_se_dev->cur_ib_noc;
+
+ if (bus_bw_update_noc || bus_bw_update)
+ ret = msm_bus_scale_client_update_request
+ (geni_se_dev->bus_bw_noc, 1);
+ }
mutex_unlock(&geni_se_dev->geni_dev_lock);
return ret;
}
@@ -692,7 +786,8 @@
return -EINVAL;
geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
- if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+ if (unlikely(!geni_se_dev || !(geni_se_dev->bus_bw ||
+ geni_se_dev->bus_bw_noc)))
return -ENODEV;
clk_disable_unprepare(rsc->se_clk);
@@ -703,6 +798,7 @@
if (ret)
GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
"%s: Error %d during bus_bw_update\n", __func__, ret);
+
return ret;
}
EXPORT_SYMBOL(se_geni_clks_off);
@@ -723,7 +819,9 @@
return -EINVAL;
geni_se_dev = dev_get_drvdata(rsc->wrapper_dev);
- if (unlikely(!geni_se_dev || !geni_se_dev->bus_bw))
+ if (unlikely(!geni_se_dev ||
+ !(geni_se_dev->bus_bw ||
+ geni_se_dev->bus_bw_noc)))
return -ENODEV;
ret = se_geni_clks_off(rsc);
@@ -743,10 +841,13 @@
{
struct se_geni_rsc *tmp = NULL;
struct list_head *ins_list_head;
+ struct list_head *ins_list_head_noc;
bool bus_bw_update = false;
+ bool bus_bw_update_noc = false;
int ret = 0;
mutex_lock(&geni_se_dev->geni_dev_lock);
+
list_add(&rsc->ab_list, &geni_se_dev->ab_list_head);
geni_se_dev->cur_ab += rsc->ab;
@@ -762,14 +863,51 @@
geni_se_dev->cur_ib = rsc->ib;
bus_bw_update = geni_se_check_bus_bw(geni_se_dev);
- if (bus_bw_update)
+
+ if (geni_se_dev->num_paths == 2) {
+ geni_se_dev->pdata->usecase[1].vectors[0].ab =
+ geni_se_dev->cur_ab;
+ geni_se_dev->pdata->usecase[1].vectors[0].ib =
+ geni_se_dev->cur_ib;
+ }
+
+ if (bus_bw_update && geni_se_dev->num_paths != 2) {
ret = msm_bus_scale_update_bw(geni_se_dev->bus_bw,
geni_se_dev->cur_ab,
geni_se_dev->cur_ib);
- GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
- "%s: %s: cur_ab_ib(%lu:%lu) req_ab_ib(%lu:%lu) %d\n",
- __func__, dev_name(rsc->ctrl_dev), geni_se_dev->cur_ab,
- geni_se_dev->cur_ib, rsc->ab, rsc->ib, bus_bw_update);
+ GENI_SE_DBG(geni_se_dev->log_ctx, false, NULL,
+ "%s: %lu:%lu (%lu:%lu) %d\n", __func__,
+ geni_se_dev->cur_ab, geni_se_dev->cur_ib,
+ rsc->ab, rsc->ib, bus_bw_update);
+ }
+
+ if (geni_se_dev->num_paths == 2) {
+
+ list_add(&rsc->ab_list_noc, &geni_se_dev->ab_list_head_noc);
+ geni_se_dev->cur_ab_noc += rsc->ab_noc;
+ ins_list_head_noc = &geni_se_dev->ib_list_head_noc;
+
+ list_for_each_entry(tmp, &geni_se_dev->ib_list_head_noc,
+ ib_list_noc) {
+ if (tmp->ib < rsc->ib)
+ break;
+ ins_list_head_noc = &tmp->ib_list_noc;
+ }
+ list_add(&rsc->ib_list_noc, ins_list_head_noc);
+
+ if (ins_list_head_noc == &geni_se_dev->ib_list_head_noc)
+ geni_se_dev->cur_ib_noc = rsc->ib_noc;
+
+ bus_bw_update_noc = geni_se_check_bus_bw_noc(geni_se_dev);
+
+ geni_se_dev->pdata->usecase[1].vectors[1].ab =
+ geni_se_dev->cur_ab_noc;
+ geni_se_dev->pdata->usecase[1].vectors[1].ib =
+ geni_se_dev->cur_ib_noc;
+ if (bus_bw_update_noc || bus_bw_update)
+ ret = msm_bus_scale_client_update_request
+ (geni_se_dev->bus_bw_noc, 1);
+ }
mutex_unlock(&geni_se_dev->geni_dev_lock);
return ret;
}
@@ -882,21 +1020,42 @@
if (unlikely(!geni_se_dev))
return -EPROBE_DEFER;
- if (unlikely(IS_ERR_OR_NULL(geni_se_dev->bus_bw))) {
- geni_se_dev->bus_bw = msm_bus_scale_register(
- geni_se_dev->bus_mas_id,
- geni_se_dev->bus_slv_id,
- (char *)dev_name(geni_se_dev->dev),
- false);
- if (IS_ERR_OR_NULL(geni_se_dev->bus_bw)) {
- GENI_SE_ERR(geni_se_dev->log_ctx, false, NULL,
- "%s: Error creating bus client\n", __func__);
- return (int)PTR_ERR(geni_se_dev->bus_bw);
+ if (geni_se_dev->num_paths == 2) {
+ if (unlikely(!(geni_se_dev->bus_bw_noc))) {
+ geni_se_dev->bus_bw_noc =
+ msm_bus_scale_register_client(geni_se_dev->pdata);
+ if (!(geni_se_dev->bus_bw_noc)) {
+ GENI_SE_ERR(geni_se_dev->log_ctx,
+ false, NULL,
+ "%s: Error creating bus client\n", __func__);
+ return -EFAULT;
+ }
}
+
+ rsc->ab = ab;
+ rsc->ib = ab;
+ rsc->ab_noc = 0;
+ rsc->ib_noc = ib;
+ INIT_LIST_HEAD(&rsc->ab_list_noc);
+ INIT_LIST_HEAD(&rsc->ib_list_noc);
+ } else {
+ if (unlikely(IS_ERR_OR_NULL(geni_se_dev->bus_bw))) {
+ geni_se_dev->bus_bw = msm_bus_scale_register(
+ geni_se_dev->bus_mas_id,
+ geni_se_dev->bus_slv_id,
+ (char *)dev_name(geni_se_dev->dev),
+ false);
+ if (IS_ERR_OR_NULL(geni_se_dev->bus_bw)) {
+ GENI_SE_ERR(geni_se_dev->log_ctx,
+ false, NULL,
+ "%s: Error creating bus client\n", __func__);
+ return (int)PTR_ERR(geni_se_dev->bus_bw);
+ }
+ }
+ rsc->ab = ab;
+ rsc->ib = ib;
}
- rsc->ab = ab;
- rsc->ib = ib;
INIT_LIST_HEAD(&rsc->ab_list);
INIT_LIST_HEAD(&rsc->ib_list);
@@ -1429,6 +1588,87 @@
{}
};
+static struct msm_bus_scale_pdata *ab_ib_register(struct platform_device *pdev,
+ struct geni_se_device *host)
+{
+ int rc = 0;
+ struct device *dev = &pdev->dev;
+ int i = 0, j, len;
+ bool mem_err = false;
+ const uint32_t *vec_arr = NULL;
+ struct msm_bus_scale_pdata *pdata = NULL;
+ struct msm_bus_paths *usecase = NULL;
+
+ vec_arr = of_get_property(dev->of_node,
+ "qcom,msm-bus,vectors-bus-ids", &len);
+ if (vec_arr == NULL) {
+ pr_err("Error: Vector array not found\n");
+ rc = 1;
+ goto out;
+ }
+
+ if (len != host->num_paths * sizeof(uint32_t) * 2) {
+ pr_err("Error: Length-error on getting vectors\n");
+ rc = 1;
+ goto out;
+ }
+
+
+ pdata = devm_kzalloc(dev, sizeof(struct msm_bus_scale_pdata),
+ GFP_KERNEL);
+ if (!pdata) {
+ mem_err = true;
+ goto out;
+ }
+
+ pdata->name = (char *)dev_name(host->dev);
+
+ pdata->num_usecases = 2;
+
+ pdata->active_only = 0;
+
+ usecase = devm_kzalloc(dev, (sizeof(struct msm_bus_paths) *
+ pdata->num_usecases), GFP_KERNEL);
+ if (!usecase) {
+ mem_err = true;
+ goto out;
+ }
+
+ for (i = 0; i < pdata->num_usecases; i++) {
+ usecase[i].num_paths = host->num_paths;
+ usecase[i].vectors = devm_kzalloc(dev, host->num_paths *
+ sizeof(struct msm_bus_vectors), GFP_KERNEL);
+ if (!usecase[i].vectors) {
+ mem_err = true;
+ pr_err("Error: Mem alloc failure in vectors\n");
+ goto out;
+ }
+
+ for (j = 0; j < host->num_paths; j++) {
+ int index = (j * 2);
+
+ usecase[i].vectors[j].src =
+ be32_to_cpu(vec_arr[index]);
+ usecase[i].vectors[j].dst =
+ be32_to_cpu(vec_arr[index + 1]);
+ usecase[i].vectors[j].ab = 0;
+ usecase[i].vectors[j].ib = 0;
+ }
+ }
+
+ pdata->usecase = usecase;
+
+ return pdata;
+out:
+ if (mem_err) {
+ for ( ; i > 0; i--)
+ devm_kfree(dev, usecase[i-1].vectors);
+ devm_kfree(dev, usecase);
+ devm_kfree(dev, pdata);
+ }
+ return NULL;
+}
+
static int geni_se_iommu_probe(struct device *dev)
{
struct geni_se_device *geni_se_dev;
@@ -1489,29 +1729,57 @@
}
geni_se_dev->dev = dev;
- geni_se_dev->cb_dev = dev;
- ret = of_property_read_u32(dev->of_node, "qcom,bus-mas-id",
- &geni_se_dev->bus_mas_id);
- if (ret) {
- dev_err(dev, "%s: Error missing bus master id\n", __func__);
- devm_iounmap(dev, geni_se_dev->base);
- devm_kfree(dev, geni_se_dev);
+
+ ret = of_property_read_u32(dev->of_node, "qcom,msm-bus,num-paths",
+ &geni_se_dev->num_paths);
+ if (!ret) {
+ geni_se_dev->pdata = ab_ib_register(pdev, geni_se_dev);
+ if (geni_se_dev->pdata == NULL) {
+ dev_err(dev,
+ "%s: Error missing bus master and slave id\n",
+ __func__);
+ devm_iounmap(dev, geni_se_dev->base);
+ devm_kfree(dev, geni_se_dev);
+ }
}
- ret = of_property_read_u32(dev->of_node, "qcom,bus-slv-id",
+
+ else {
+ geni_se_dev->num_paths = 1;
+ ret = of_property_read_u32(dev->of_node, "qcom,bus-mas-id",
+ &geni_se_dev->bus_mas_id);
+ if (ret) {
+ dev_err(dev, "%s: Error missing bus master id\n",
+ __func__);
+ devm_iounmap(dev, geni_se_dev->base);
+ devm_kfree(dev, geni_se_dev);
+ }
+ ret = of_property_read_u32(dev->of_node, "qcom,bus-slv-id",
&geni_se_dev->bus_slv_id);
- if (ret) {
- dev_err(dev, "%s: Error missing bus slave id\n", __func__);
- devm_iounmap(dev, geni_se_dev->base);
- devm_kfree(dev, geni_se_dev);
+ if (ret) {
+ dev_err(dev, "%s: Error missing bus slave id\n",
+ __func__);
+ devm_iounmap(dev, geni_se_dev->base);
+ devm_kfree(dev, geni_se_dev);
+ }
}
geni_se_dev->iommu_s1_bypass = of_property_read_bool(dev->of_node,
"qcom,iommu-s1-bypass");
geni_se_dev->bus_bw_set = default_bus_bw_set;
- geni_se_dev->bus_bw_set_size = ARRAY_SIZE(default_bus_bw_set);
+ geni_se_dev->bus_bw_set_size =
+ ARRAY_SIZE(default_bus_bw_set);
+ if (geni_se_dev->num_paths == 2) {
+ geni_se_dev->bus_bw_set_noc = default_bus_bw_set;
+ geni_se_dev->bus_bw_set_size_noc =
+ ARRAY_SIZE(default_bus_bw_set);
+ }
mutex_init(&geni_se_dev->iommu_lock);
INIT_LIST_HEAD(&geni_se_dev->ab_list_head);
INIT_LIST_HEAD(&geni_se_dev->ib_list_head);
+ if (geni_se_dev->num_paths == 2) {
+ INIT_LIST_HEAD(&geni_se_dev->ab_list_head_noc);
+ INIT_LIST_HEAD(&geni_se_dev->ib_list_head_noc);
+ }
mutex_init(&geni_se_dev->geni_dev_lock);
geni_se_dev->log_ctx = ipc_log_context_create(NUM_LOG_PAGES,
dev_name(geni_se_dev->dev), 0);
diff --git a/drivers/platform/x86/Kconfig b/drivers/platform/x86/Kconfig
index 0c1aa6c..7563c07 100644
--- a/drivers/platform/x86/Kconfig
+++ b/drivers/platform/x86/Kconfig
@@ -856,6 +856,7 @@
config ACPI_CMPC
tristate "CMPC Laptop Extras"
depends on ACPI && INPUT
+ depends on BACKLIGHT_LCD_SUPPORT
depends on RFKILL || RFKILL=n
select BACKLIGHT_CLASS_DEVICE
help
@@ -1077,6 +1078,7 @@
config SAMSUNG_Q10
tristate "Samsung Q10 Extras"
depends on ACPI
+ depends on BACKLIGHT_LCD_SUPPORT
select BACKLIGHT_CLASS_DEVICE
---help---
This driver provides support for backlight control on Samsung Q10
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 86926db..da7e614 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#define pr_fmt(fmt) "QG-K: %s: " fmt, __func__
@@ -1807,6 +1807,9 @@
case POWER_SUPPLY_PROP_SOH:
pval->intval = chip->soh;
break;
+ case POWER_SUPPLY_PROP_CC_SOC:
+ rc = qg_get_cc_soc(chip, &pval->intval);
+ break;
default:
pr_debug("Unsupported property %d\n", psp);
break;
@@ -1857,6 +1860,7 @@
POWER_SUPPLY_PROP_ESR_ACTUAL,
POWER_SUPPLY_PROP_ESR_NOMINAL,
POWER_SUPPLY_PROP_SOH,
+ POWER_SUPPLY_PROP_CC_SOC,
};
static const struct power_supply_desc qg_psy_desc = {
diff --git a/drivers/power/supply/qcom/qpnp-smb5.c b/drivers/power/supply/qcom/qpnp-smb5.c
index 5e85290..b11818e 100644
--- a/drivers/power/supply/qcom/qpnp-smb5.c
+++ b/drivers/power/supply/qcom/qpnp-smb5.c
@@ -438,8 +438,8 @@
chg->sw_jeita_enabled = of_property_read_bool(node,
"qcom,sw-jeita-enable");
- chg->pd_not_supported = of_property_read_bool(node,
- "qcom,usb-pd-disable");
+ chg->pd_not_supported = chg->pd_not_supported ||
+ of_property_read_bool(node, "qcom,usb-pd-disable");
chg->lpd_disabled = of_property_read_bool(node, "qcom,lpd-disable");
diff --git a/drivers/power/supply/qcom/smb1390-charger-psy.c b/drivers/power/supply/qcom/smb1390-charger-psy.c
index af4dc03..b00ac7c 100644
--- a/drivers/power/supply/qcom/smb1390-charger-psy.c
+++ b/drivers/power/supply/qcom/smb1390-charger-psy.c
@@ -5,6 +5,7 @@
#define pr_fmt(fmt) "SMB1390: %s: " fmt, __func__
+#include <linux/debugfs.h>
#include <linux/device.h>
#include <linux/interrupt.h>
#include <linux/module.h>
@@ -84,6 +85,16 @@
#define SRC_VOTER "SRC_VOTER"
#define SWITCHER_TOGGLE_VOTER "SWITCHER_TOGGLE_VOTER"
+#define smb1390_dbg(chip, reason, fmt, ...) \
+ do { \
+ if (chip->debug_mask & (reason)) \
+ pr_info("SMB1390: %s: " fmt, __func__, \
+ ##__VA_ARGS__); \
+ else \
+ pr_debug("SMB1390: %s: " fmt, __func__, \
+ ##__VA_ARGS__); \
+ } while (0)
+
enum {
SWITCHER_OFF_WINDOW_IRQ = 0,
SWITCHER_OFF_FAULT_IRQ,
@@ -101,6 +112,14 @@
SMB_PIN_EN,
};
+enum print_reason {
+ PR_INTERRUPT = BIT(0),
+ PR_REGISTER = BIT(1),
+ PR_INFO = BIT(2),
+ PR_EXT_DEPENDENCY = BIT(3),
+ PR_MISC = BIT(4),
+};
+
struct smb1390_iio {
struct iio_channel *die_temp_chan;
};
@@ -110,6 +129,7 @@
struct regmap *regmap;
struct notifier_block nb;
struct wakeup_source *cp_ws;
+ struct dentry *dfs_root;
/* work structs */
struct work_struct status_change_work;
@@ -140,6 +160,7 @@
bool switcher_enabled;
int die_temp;
bool suspended;
+ u32 debug_mask;
};
struct smb_irq {
@@ -166,7 +187,8 @@
{
int rc;
- pr_debug("Writing 0x%02x to 0x%04x with mask 0x%02x\n", val, reg, mask);
+ smb1390_dbg(chip, PR_REGISTER, "Writing 0x%02x to 0x%04x with mask 0x%02x\n",
+ val, reg, mask);
rc = regmap_update_bits(chip->regmap, reg, mask, val);
if (rc < 0)
pr_err("Couldn't write 0x%02x to 0x%04x with mask 0x%02x\n",
@@ -180,7 +202,7 @@
if (!chip->batt_psy) {
chip->batt_psy = power_supply_get_by_name("battery");
if (!chip->batt_psy) {
- pr_debug("Couldn't find battery psy\n");
+ smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find battery psy\n");
return false;
}
}
@@ -188,7 +210,7 @@
if (!chip->usb_psy) {
chip->usb_psy = power_supply_get_by_name("usb");
if (!chip->usb_psy) {
- pr_debug("Couldn't find usb psy\n");
+ smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find usb psy\n");
return false;
}
}
@@ -196,7 +218,7 @@
if (!chip->dc_psy) {
chip->dc_psy = power_supply_get_by_name("dc");
if (!chip->dc_psy) {
- pr_debug("Couldn't find dc psy\n");
+ smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find dc psy\n");
return false;
}
}
@@ -204,7 +226,7 @@
if (!chip->fcc_votable) {
chip->fcc_votable = find_votable("FCC");
if (!chip->fcc_votable) {
- pr_debug("Couldn't find FCC votable\n");
+ smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find FCC votable\n");
return false;
}
}
@@ -212,13 +234,13 @@
if (!chip->fv_votable) {
chip->fv_votable = find_votable("FV");
if (!chip->fv_votable) {
- pr_debug("Couldn't find FV votable\n");
+ smb1390_dbg(chip, PR_EXT_DEPENDENCY, "Couldn't find FV votable\n");
return false;
}
}
if (!chip->disable_votable) {
- pr_debug("Couldn't find CP DISABLE votable\n");
+ smb1390_dbg(chip, PR_MISC, "Couldn't find CP DISABLE votable\n");
return false;
}
@@ -254,7 +276,8 @@
*enable = !!(status & EN_PIN_OUT2_BIT);
break;
default:
- pr_debug("cp_en status %d is not supported\n", id);
+ smb1390_dbg(chip, PR_MISC, "cp_en status %d is not supported\n",
+ id);
rc = -EINVAL;
break;
}
@@ -270,7 +293,8 @@
for (i = 0; i < NUM_IRQS; ++i) {
if (irq == chip->irqs[i]) {
- pr_debug("%s IRQ triggered\n", smb_irqs[i].name);
+ smb1390_dbg(chip, PR_INTERRUPT, "%s IRQ triggered\n",
+ smb_irqs[i].name);
chip->irq_status |= 1 << i;
}
}
@@ -471,10 +495,11 @@
/* ILIM less than 1A is not accurate; disable charging */
if (ilim_uA < 1000000) {
- pr_debug("ILIM %duA is too low to allow charging\n", ilim_uA);
+ smb1390_dbg(chip, PR_INFO, "ILIM %duA is too low to allow charging\n",
+ ilim_uA);
vote(chip->disable_votable, ILIM_VOTER, true, 0);
} else {
- pr_debug("ILIM set to %duA\n", ilim_uA);
+ smb1390_dbg(chip, PR_INFO, "ILIM set to %duA\n", ilim_uA);
vote(chip->disable_votable, ILIM_VOTER, false, 0);
}
@@ -621,7 +646,7 @@
if (get_effective_result(chip->fv_votable) >
chip->taper_entry_fv) {
- pr_debug("Float voltage increased. Exiting taper\n");
+ smb1390_dbg(chip, PR_INFO, "Float voltage increased. Exiting taper\n");
goto out;
} else {
chip->taper_entry_fv =
@@ -631,7 +656,8 @@
if (pval.intval == POWER_SUPPLY_CHARGE_TYPE_TAPER) {
fcc_uA = get_effective_result(chip->fcc_votable)
- 100000;
- pr_debug("taper work reducing FCC to %duA\n", fcc_uA);
+ smb1390_dbg(chip, PR_INFO, "taper work reducing FCC to %duA\n",
+ fcc_uA);
vote(chip->fcc_votable, CP_VOTER, true, fcc_uA);
if (fcc_uA < 2000000) {
@@ -640,13 +666,13 @@
goto out;
}
} else {
- pr_debug("In fast charging. Wait for next taper\n");
+ smb1390_dbg(chip, PR_INFO, "In fast charging. Wait for next taper\n");
}
msleep(500);
}
out:
- pr_debug("taper work exit\n");
+ smb1390_dbg(chip, PR_INFO, "taper work exit\n");
vote(chip->fcc_votable, CP_VOTER, false, 0);
chip->taper_work_running = false;
}
@@ -733,7 +759,7 @@
+ 500000;
break;
default:
- pr_debug("charge pump power supply get prop %d not supported\n",
+ smb1390_dbg(chip, PR_MISC, "charge pump power supply get prop %d not supported\n",
prop);
return -EINVAL;
}
@@ -760,7 +786,7 @@
chip->irq_status = val->intval;
break;
default:
- pr_debug("charge pump power supply set prop %d not supported\n",
+ smb1390_dbg(chip, PR_MISC, "charge pump power supply set prop %d not supported\n",
prop);
return -EINVAL;
}
@@ -969,6 +995,31 @@
return rc;
}
+#ifdef CONFIG_DEBUG_FS
+static void smb1390_create_debugfs(struct smb1390 *chip)
+{
+ struct dentry *entry;
+
+ chip->dfs_root = debugfs_create_dir("smb1390_charger_psy", NULL);
+ if (IS_ERR_OR_NULL(chip->dfs_root)) {
+ pr_err("Failed to create debugfs directory, rc=%ld\n",
+ (long)chip->dfs_root);
+ return;
+ }
+
+ entry = debugfs_create_u32("debug_mask", 0600, chip->dfs_root,
+ &chip->debug_mask);
+ if (IS_ERR_OR_NULL(entry)) {
+ pr_err("Failed to create debug_mask, rc=%ld\n", (long)entry);
+ debugfs_remove_recursive(chip->dfs_root);
+ }
+}
+#else
+static void smb1390_create_debugfs(struct smb1390 *chip)
+{
+}
+#endif
+
static int smb1390_probe(struct platform_device *pdev)
{
struct smb1390 *chip;
@@ -1034,7 +1085,10 @@
goto out_notifier;
}
+ smb1390_create_debugfs(chip);
+
pr_debug("smb1390 probed successfully\n");
+
return 0;
out_notifier:
diff --git a/drivers/power/supply/qcom/smb5-lib.c b/drivers/power/supply/qcom/smb5-lib.c
index 2a23637..7c4a9df 100644
--- a/drivers/power/supply/qcom/smb5-lib.c
+++ b/drivers/power/supply/qcom/smb5-lib.c
@@ -904,7 +904,7 @@
int rc;
u8 mask;
- if (chg->hvdcp_disable)
+ if (chg->hvdcp_disable || chg->pd_not_supported)
return;
mask = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT;
@@ -3376,7 +3376,7 @@
union power_supply_propval *val)
{
union power_supply_propval pval = {0, };
- int rc = 0;
+ int rc = 0, buck_scale = 1, boost_scale = 1;
if (chg->iio.usbin_i_chan) {
rc = iio_read_channel_processed(chg->iio.usbin_i_chan,
@@ -3389,10 +3389,24 @@
/*
* For PM8150B, scaling factor = reciprocal of
* 0.2V/A in Buck mode, 0.4V/A in Boost mode.
+ * For PMI632, scaling factor = reciprocal of
+ * 0.4V/A in Buck mode, 0.8V/A in Boost mode.
*/
+ switch (chg->smb_version) {
+ case PMI632_SUBTYPE:
+ buck_scale = 40;
+ boost_scale = 80;
+ break;
+ default:
+ buck_scale = 20;
+ boost_scale = 40;
+ break;
+ }
+
if (chg->otg_present || smblib_get_prop_dfp_mode(chg) !=
POWER_SUPPLY_TYPEC_NONE) {
- val->intval = DIV_ROUND_CLOSEST(val->intval * 100, 40);
+ val->intval = DIV_ROUND_CLOSEST(val->intval * 100,
+ boost_scale);
return rc;
}
@@ -3407,7 +3421,8 @@
if (!pval.intval)
val->intval = 0;
else
- val->intval *= 5;
+ val->intval = DIV_ROUND_CLOSEST(val->intval * 100,
+ buck_scale);
} else {
val->intval = 0;
rc = -ENODATA;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 970654f..2d1f6a5 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -22,6 +22,7 @@
#include <linux/hashtable.h>
#include <linux/ip.h>
#include <linux/refcount.h>
+#include <linux/workqueue.h>
#include <net/ipv6.h>
#include <net/if_inet6.h>
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index b03515d..56aacf3 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -565,6 +565,7 @@
QETH_DBF_MESSAGE(2, "%s error in starting next read ccw! "
"rc=%i\n", dev_name(&card->gdev->dev), rc);
atomic_set(&channel->irq_pending, 0);
+ qeth_release_buffer(channel, iob);
card->read_or_write_problem = 1;
qeth_schedule_recovery(card);
wake_up(&card->wait_q);
@@ -1187,6 +1188,8 @@
rc = qeth_get_problem(cdev, irb);
if (rc) {
card->read_or_write_problem = 1;
+ if (iob)
+ qeth_release_buffer(iob->channel, iob);
qeth_clear_ipacmd_list(card);
qeth_schedule_recovery(card);
goto out;
@@ -1852,6 +1855,7 @@
QETH_DBF_MESSAGE(2, "Error2 in activating channel rc=%d\n", rc);
QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
atomic_set(&channel->irq_pending, 0);
+ qeth_release_buffer(channel, iob);
wake_up(&card->wait_q);
return rc;
}
@@ -1923,6 +1927,7 @@
rc);
QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
atomic_set(&channel->irq_pending, 0);
+ qeth_release_buffer(channel, iob);
wake_up(&card->wait_q);
return rc;
}
@@ -2110,6 +2115,7 @@
}
reply = qeth_alloc_reply(card);
if (!reply) {
+ qeth_release_buffer(channel, iob);
return -ENOMEM;
}
reply->callback = reply_cb;
@@ -2448,11 +2454,12 @@
return 0;
}
-static void qeth_free_qdio_out_buf(struct qeth_qdio_out_q *q)
+static void qeth_free_output_queue(struct qeth_qdio_out_q *q)
{
if (!q)
return;
+ qeth_clear_outq_buffers(q, 1);
qdio_free_buffers(q->qdio_bufs, QDIO_MAX_BUFFERS_PER_Q);
kfree(q);
}
@@ -2526,10 +2533,8 @@
card->qdio.out_qs[i]->bufs[j] = NULL;
}
out_freeoutq:
- while (i > 0) {
- qeth_free_qdio_out_buf(card->qdio.out_qs[--i]);
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
- }
+ while (i > 0)
+ qeth_free_output_queue(card->qdio.out_qs[--i]);
kfree(card->qdio.out_qs);
card->qdio.out_qs = NULL;
out_freepool:
@@ -2562,10 +2567,8 @@
qeth_free_buffer_pool(card);
/* free outbound qdio_qs */
if (card->qdio.out_qs) {
- for (i = 0; i < card->qdio.no_out_queues; ++i) {
- qeth_clear_outq_buffers(card->qdio.out_qs[i], 1);
- qeth_free_qdio_out_buf(card->qdio.out_qs[i]);
- }
+ for (i = 0; i < card->qdio.no_out_queues; i++)
+ qeth_free_output_queue(card->qdio.out_qs[i]);
kfree(card->qdio.out_qs);
card->qdio.out_qs = NULL;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index 76b2fba..b7513c5 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -854,6 +854,8 @@
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l2_set_offline(cgdev);
+
+ cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
}
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index b7f6a83..7f71ca0 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -2611,6 +2611,7 @@
if (cgdev->state == CCWGROUP_ONLINE)
qeth_l3_set_offline(cgdev);
+ cancel_work_sync(&card->close_dev_work);
if (qeth_netdev_is_registered(card->dev))
unregister_netdev(card->dev);
qeth_l3_clear_ip_htable(card, 0);
diff --git a/drivers/scsi/53c700.c b/drivers/scsi/53c700.c
index 6be77b3..ac79f20 100644
--- a/drivers/scsi/53c700.c
+++ b/drivers/scsi/53c700.c
@@ -295,7 +295,7 @@
if(tpnt->sdev_attrs == NULL)
tpnt->sdev_attrs = NCR_700_dev_attrs;
- memory = dma_alloc_attrs(hostdata->dev, TOTAL_MEM_SIZE, &pScript,
+ memory = dma_alloc_attrs(dev, TOTAL_MEM_SIZE, &pScript,
GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
if(memory == NULL) {
printk(KERN_ERR "53c700: Failed to allocate memory for driver, detaching\n");
diff --git a/drivers/scsi/aacraid/commsup.c b/drivers/scsi/aacraid/commsup.c
index 6e1b022..3236240 100644
--- a/drivers/scsi/aacraid/commsup.c
+++ b/drivers/scsi/aacraid/commsup.c
@@ -1304,8 +1304,9 @@
ADD : DELETE;
break;
}
- case AifBuManagerEvent:
- aac_handle_aif_bu(dev, aifcmd);
+ break;
+ case AifBuManagerEvent:
+ aac_handle_aif_bu(dev, aifcmd);
break;
}
diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c
index 350257c..bc9f2a2 100644
--- a/drivers/scsi/bnx2fc/bnx2fc_io.c
+++ b/drivers/scsi/bnx2fc/bnx2fc_io.c
@@ -240,6 +240,7 @@
return NULL;
}
+ cmgr->hba = hba;
cmgr->free_list = kcalloc(arr_sz, sizeof(*cmgr->free_list),
GFP_KERNEL);
if (!cmgr->free_list) {
@@ -256,7 +257,6 @@
goto mem_err;
}
- cmgr->hba = hba;
cmgr->cmds = (struct bnx2fc_cmd **)(cmgr + 1);
for (i = 0; i < arr_sz; i++) {
@@ -295,7 +295,7 @@
/* Allocate pool of io_bdts - one for each bnx2fc_cmd */
mem_size = num_ios * sizeof(struct io_bdt *);
- cmgr->io_bdt_pool = kmalloc(mem_size, GFP_KERNEL);
+ cmgr->io_bdt_pool = kzalloc(mem_size, GFP_KERNEL);
if (!cmgr->io_bdt_pool) {
printk(KERN_ERR PFX "failed to alloc io_bdt_pool\n");
goto mem_err;
diff --git a/drivers/scsi/libfc/fc_lport.c b/drivers/scsi/libfc/fc_lport.c
index be83590..ff943f4 100644
--- a/drivers/scsi/libfc/fc_lport.c
+++ b/drivers/scsi/libfc/fc_lport.c
@@ -1726,14 +1726,14 @@
fc_frame_payload_op(fp) != ELS_LS_ACC) {
FC_LPORT_DBG(lport, "FLOGI not accepted or bad response\n");
fc_lport_error(lport, fp);
- goto err;
+ goto out;
}
flp = fc_frame_payload_get(fp, sizeof(*flp));
if (!flp) {
FC_LPORT_DBG(lport, "FLOGI bad response\n");
fc_lport_error(lport, fp);
- goto err;
+ goto out;
}
mfs = ntohs(flp->fl_csp.sp_bb_data) &
@@ -1743,7 +1743,7 @@
FC_LPORT_DBG(lport, "FLOGI bad mfs:%hu response, "
"lport->mfs:%hu\n", mfs, lport->mfs);
fc_lport_error(lport, fp);
- goto err;
+ goto out;
}
if (mfs <= lport->mfs) {
diff --git a/drivers/scsi/libfc/fc_rport.c b/drivers/scsi/libfc/fc_rport.c
index 372387a..1797e47 100644
--- a/drivers/scsi/libfc/fc_rport.c
+++ b/drivers/scsi/libfc/fc_rport.c
@@ -184,7 +184,6 @@
struct fc_rport_priv *rdata;
rdata = container_of(kref, struct fc_rport_priv, kref);
- WARN_ON(!list_empty(&rdata->peers));
kfree_rcu(rdata, rcu);
}
EXPORT_SYMBOL(fc_rport_destroy);
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 60bcc6d..65305b3 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -62,7 +62,7 @@
/* make sure inq_product_rev string corresponds to this version */
#define SDEBUG_VERSION "0188" /* format to fit INQUIRY revision field */
-static const char *sdebug_version_date = "20180128";
+static const char *sdebug_version_date = "20190125";
#define MY_NAME "scsi_debug"
@@ -735,7 +735,7 @@
(sdebug_lbpu || sdebug_lbpws || sdebug_lbpws10);
}
-static void *fake_store(unsigned long long lba)
+static void *lba2fake_store(unsigned long long lba)
{
lba = do_div(lba, sdebug_store_sectors);
@@ -2514,8 +2514,8 @@
return ret;
}
-/* If fake_store(lba,num) compares equal to arr(num), then copy top half of
- * arr into fake_store(lba,num) and return true. If comparison fails then
+/* If lba2fake_store(lba,num) compares equal to arr(num), then copy top half of
+ * arr into lba2fake_store(lba,num) and return true. If comparison fails then
* return false. */
static bool comp_write_worker(u64 lba, u32 num, const u8 *arr)
{
@@ -2643,7 +2643,7 @@
if (sdt->app_tag == cpu_to_be16(0xffff))
continue;
- ret = dif_verify(sdt, fake_store(sector), sector, ei_lba);
+ ret = dif_verify(sdt, lba2fake_store(sector), sector, ei_lba);
if (ret) {
dif_errors++;
return ret;
@@ -3261,10 +3261,12 @@
static int resp_write_same(struct scsi_cmnd *scp, u64 lba, u32 num,
u32 ei_lba, bool unmap, bool ndob)
{
+ int ret;
unsigned long iflags;
unsigned long long i;
- int ret;
- u64 lba_off;
+ u32 lb_size = sdebug_sector_size;
+ u64 block, lbaa;
+ u8 *fs1p;
ret = check_device_access_params(scp, lba, num);
if (ret)
@@ -3276,31 +3278,30 @@
unmap_region(lba, num);
goto out;
}
-
- lba_off = lba * sdebug_sector_size;
+ lbaa = lba;
+ block = do_div(lbaa, sdebug_store_sectors);
/* if ndob then zero 1 logical block, else fetch 1 logical block */
+ fs1p = fake_storep + (block * lb_size);
if (ndob) {
- memset(fake_storep + lba_off, 0, sdebug_sector_size);
+ memset(fs1p, 0, lb_size);
ret = 0;
} else
- ret = fetch_to_dev_buffer(scp, fake_storep + lba_off,
- sdebug_sector_size);
+ ret = fetch_to_dev_buffer(scp, fs1p, lb_size);
if (-1 == ret) {
write_unlock_irqrestore(&atomic_rw, iflags);
return DID_ERROR << 16;
- } else if (sdebug_verbose && !ndob && (ret < sdebug_sector_size))
+ } else if (sdebug_verbose && !ndob && (ret < lb_size))
sdev_printk(KERN_INFO, scp->device,
"%s: %s: lb size=%u, IO sent=%d bytes\n",
- my_name, "write same",
- sdebug_sector_size, ret);
+ my_name, "write same", lb_size, ret);
/* Copy first sector to remaining blocks */
- for (i = 1 ; i < num ; i++)
- memcpy(fake_storep + ((lba + i) * sdebug_sector_size),
- fake_storep + lba_off,
- sdebug_sector_size);
-
+ for (i = 1 ; i < num ; i++) {
+ lbaa = lba + i;
+ block = do_div(lbaa, sdebug_store_sectors);
+ memmove(fake_storep + (block * lb_size), fs1p, lb_size);
+ }
if (scsi_debug_lbp())
map_region(lba, num);
out:
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index ffeac4b..c678bf9 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -761,6 +761,7 @@
set_host_byte(cmd, DID_OK);
return BLK_STS_TARGET;
case DID_NEXUS_FAILURE:
+ set_host_byte(cmd, DID_OK);
return BLK_STS_NEXUS;
case DID_ALLOC_FAILURE:
set_host_byte(cmd, DID_OK);
diff --git a/drivers/soc/fsl/qbman/qman.c b/drivers/soc/fsl/qbman/qman.c
index 8cc0151..a4ac607 100644
--- a/drivers/soc/fsl/qbman/qman.c
+++ b/drivers/soc/fsl/qbman/qman.c
@@ -1081,18 +1081,19 @@
static irqreturn_t portal_isr(int irq, void *ptr)
{
struct qman_portal *p = ptr;
-
- u32 clear = QM_DQAVAIL_MASK | p->irq_sources;
u32 is = qm_in(&p->p, QM_REG_ISR) & p->irq_sources;
+ u32 clear = 0;
if (unlikely(!is))
return IRQ_NONE;
/* DQRR-handling if it's interrupt-driven */
- if (is & QM_PIRQ_DQRI)
+ if (is & QM_PIRQ_DQRI) {
__poll_portal_fast(p, QMAN_POLL_LIMIT);
+ clear = QM_DQAVAIL_MASK | QM_PIRQ_DQRI;
+ }
/* Handling of anything else that's interrupt-driven */
- clear |= __poll_portal_slow(p, is);
+ clear |= __poll_portal_slow(p, is) & QM_PIRQ_SLOW;
qm_out(&p->p, QM_REG_ISR, clear);
return IRQ_HANDLED;
}
diff --git a/drivers/soc/qcom/dfc_qmi.c b/drivers/soc/qcom/dfc_qmi.c
index 0304fcc..8fae3f1 100644
--- a/drivers/soc/qcom/dfc_qmi.c
+++ b/drivers/soc/qcom/dfc_qmi.c
@@ -1,20 +1,25 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <linux/rtnetlink.h>
#include <net/pkt_sched.h>
#include <linux/soc/qcom/qmi.h>
#include <soc/qcom/rmnet_qmi.h>
+#include <soc/qcom/qmi_rmnet.h>
#include "qmi_rmnet_i.h"
#define CREATE_TRACE_POINTS
#include <trace/events/dfc.h>
+#define DFC_MASK_TCP_BIDIR 0x1
+#define DFC_MASK_RAT_SWITCH 0x2
+#define DFC_IS_TCP_BIDIR(r) (bool)((r) & DFC_MASK_TCP_BIDIR)
+#define DFC_IS_RAT_SWITCH(r) (bool)((r) & DFC_MASK_RAT_SWITCH)
+
#define DFC_IS_ANCILLARY(type) ((type) != AF_INET && (type) != AF_INET6)
-#define DFC_MAX_BEARERS_V01 16
#define DFC_MAX_QOS_ID_V01 2
#define DFC_ACK_TYPE_DISABLE 1
@@ -50,17 +55,20 @@
struct work_struct svc_arrive;
struct qmi_handle handle;
struct sockaddr_qrtr ssctl;
+ struct svc_info svc;
+ struct work_struct qmi_ind_work;
+ struct list_head qmi_ind_q;
+ spinlock_t qmi_ind_lock;
int index;
int restart_state;
};
static void dfc_svc_init(struct work_struct *work);
-static void dfc_do_burst_flow_control(struct work_struct *work);
/* **************************************************** */
#define DFC_SERVICE_ID_V01 0x4E
#define DFC_SERVICE_VERS_V01 0x01
-#define DFC_TIMEOUT_MS 10000
+#define DFC_TIMEOUT_JF msecs_to_jiffies(1000)
#define QMI_DFC_BIND_CLIENT_REQ_V01 0x0020
#define QMI_DFC_BIND_CLIENT_RESP_V01 0x0020
@@ -75,6 +83,11 @@
#define QMI_DFC_FLOW_STATUS_IND_V01 0x0022
#define QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN 540
+#define QMI_DFC_GET_FLOW_STATUS_REQ_V01 0x0023
+#define QMI_DFC_GET_FLOW_STATUS_RESP_V01 0x0023
+#define QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN 20
+#define QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN 543
+
struct dfc_bind_client_req_msg_v01 {
u8 ep_id_valid;
struct data_ep_id_type_v01 ep_id;
@@ -298,9 +311,21 @@
struct dfc_ancillary_info_type_v01 ancillary_info[DFC_MAX_BEARERS_V01];
};
+struct dfc_get_flow_status_req_msg_v01 {
+ u8 bearer_id_list_valid;
+ u8 bearer_id_list_len;
+ u8 bearer_id_list[DFC_MAX_BEARERS_V01];
+};
+
+struct dfc_get_flow_status_resp_msg_v01 {
+ struct qmi_response_type_v01 resp;
+ u8 flow_status_valid;
+ u8 flow_status_len;
+ struct dfc_flow_status_info_type_v01 flow_status[DFC_MAX_BEARERS_V01];
+};
+
struct dfc_svc_ind {
- struct work_struct work;
- struct dfc_qmi_data *data;
+ struct list_head list;
struct dfc_flow_status_ind_msg_v01 dfc_info;
};
@@ -497,6 +522,100 @@
},
};
+static struct qmi_elem_info dfc_get_flow_status_req_msg_v01_ei[] = {
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ dfc_get_flow_status_req_msg_v01,
+ bearer_id_list_valid),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ dfc_get_flow_status_req_msg_v01,
+ bearer_id_list_len),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = DFC_MAX_BEARERS_V01,
+ .elem_size = sizeof(u8),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ dfc_get_flow_status_req_msg_v01,
+ bearer_id_list),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
+static struct qmi_elem_info dfc_get_flow_status_resp_msg_v01_ei[] = {
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(struct
+ dfc_get_flow_status_resp_msg_v01,
+ resp),
+ .ei_array = qmi_response_type_v01_ei,
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ dfc_get_flow_status_resp_msg_v01,
+ flow_status_valid),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_DATA_LEN,
+ .elem_len = 1,
+ .elem_size = sizeof(u8),
+ .array_type = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ dfc_get_flow_status_resp_msg_v01,
+ flow_status_len),
+ .ei_array = NULL,
+ },
+ {
+ .data_type = QMI_STRUCT,
+ .elem_len = DFC_MAX_BEARERS_V01,
+ .elem_size = sizeof(struct
+ dfc_flow_status_info_type_v01),
+ .array_type = VAR_LEN_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(struct
+ dfc_get_flow_status_resp_msg_v01,
+ flow_status),
+ .ei_array = dfc_flow_status_info_type_v01_ei,
+ },
+ {
+ .data_type = QMI_EOTI,
+ .array_type = NO_ARRAY,
+ .tlv_type = QMI_COMMON_TLV_TYPE,
+ },
+};
+
static int
dfc_bind_client_req(struct qmi_handle *dfc_handle,
struct sockaddr_qrtr *ssctl, struct svc_info *svc)
@@ -538,7 +657,7 @@
goto out;
}
- ret = qmi_txn_wait(&txn, DFC_TIMEOUT_MS);
+ ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
if (ret < 0) {
pr_err("%s() Response waiting failed, err: %d\n",
__func__, ret);
@@ -594,7 +713,7 @@
goto out;
}
- ret = qmi_txn_wait(&txn, DFC_TIMEOUT_MS);
+ ret = qmi_txn_wait(&txn, DFC_TIMEOUT_JF);
if (ret < 0) {
pr_err("%s() Response waiting failed, err: %d\n",
__func__, ret);
@@ -610,12 +729,65 @@
return ret;
}
-static int dfc_init_service(struct dfc_qmi_data *data, struct qmi_info *qmi)
+static int
+dfc_get_flow_status_req(struct qmi_handle *dfc_handle,
+ struct sockaddr_qrtr *ssctl,
+ struct dfc_get_flow_status_resp_msg_v01 *resp)
+{
+ struct dfc_get_flow_status_req_msg_v01 *req;
+ struct qmi_txn *txn;
+ int ret;
+
+ req = kzalloc(sizeof(*req), GFP_ATOMIC);
+ if (!req)
+ return -ENOMEM;
+
+ txn = kzalloc(sizeof(*txn), GFP_ATOMIC);
+ if (!txn) {
+ kfree(req);
+ return -ENOMEM;
+ }
+
+ ret = qmi_txn_init(dfc_handle, txn,
+ dfc_get_flow_status_resp_msg_v01_ei, resp);
+ if (ret < 0) {
+ pr_err("%s() Failed init for response, err: %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = qmi_send_request(dfc_handle, ssctl, txn,
+ QMI_DFC_GET_FLOW_STATUS_REQ_V01,
+ QMI_DFC_GET_FLOW_STATUS_REQ_V01_MAX_MSG_LEN,
+ dfc_get_flow_status_req_msg_v01_ei, req);
+ if (ret < 0) {
+ qmi_txn_cancel(txn);
+ pr_err("%s() Failed sending request, err: %d\n",
+ __func__, ret);
+ goto out;
+ }
+
+ ret = qmi_txn_wait(txn, DFC_TIMEOUT_JF);
+ if (ret < 0) {
+ pr_err("%s() Response waiting failed, err: %d\n",
+ __func__, ret);
+ } else if (resp->resp.result != QMI_RESULT_SUCCESS_V01) {
+ pr_err("%s() Request rejected, result: %d, err: %d\n",
+ __func__, resp->resp.result, resp->resp.error);
+ ret = -resp->resp.result;
+ }
+
+out:
+ kfree(txn);
+ kfree(req);
+ return ret;
+}
+
+static int dfc_init_service(struct dfc_qmi_data *data)
{
int rc;
- rc = dfc_bind_client_req(&data->handle, &data->ssctl,
- &qmi->fc_info[data->index].svc);
+ rc = dfc_bind_client_req(&data->handle, &data->ssctl, &data->svc);
if (rc < 0)
return rc;
@@ -666,21 +838,18 @@
struct rmnet_bearer_map *bearer,
struct qos_info *qos)
{
- struct list_head *p;
struct rmnet_flow_map *itm;
int rc = 0, qlen;
int enable;
enable = bearer->grant_size ? 1 : 0;
- list_for_each(p, &qos->flow_head) {
- itm = list_entry(p, struct rmnet_flow_map, list);
-
+ list_for_each_entry(itm, &qos->flow_head, list) {
if (itm->bearer_id == bearer->bearer_id) {
/*
* Do not flow disable ancillary q if ancillary is true
*/
- if (bearer->ancillary && enable == 0 &&
+ if (bearer->tcp_bidir && enable == 0 &&
DFC_IS_ANCILLARY(itm->ip_type))
continue;
@@ -705,36 +874,39 @@
struct qos_info *qos, u8 ack_req, u32 ancillary,
struct dfc_flow_status_info_type_v01 *fc_info)
{
- struct list_head *p;
- struct rmnet_bearer_map *bearer_itm = NULL;
- int enable;
+ struct rmnet_bearer_map *bearer_itm;
+ struct rmnet_flow_map *flow_itm;
+ int rc = 0, qlen;
+ bool enable;
- list_for_each(p, &qos->bearer_head) {
- bearer_itm = list_entry(p, struct rmnet_bearer_map, list);
+ enable = fc_info->num_bytes > 0 ? 1 : 0;
+ list_for_each_entry(bearer_itm, &qos->bearer_head, list) {
bearer_itm->grant_size = fc_info->num_bytes;
bearer_itm->grant_thresh =
qmi_rmnet_grant_per(bearer_itm->grant_size);
bearer_itm->seq = fc_info->seq_num;
bearer_itm->ack_req = ack_req;
- bearer_itm->ancillary = ancillary;
+ bearer_itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
+ bearer_itm->last_grant = fc_info->num_bytes;
+ bearer_itm->last_seq = fc_info->seq_num;
}
- enable = fc_info->num_bytes > 0 ? 1 : 0;
-
- if (enable)
- netif_tx_wake_all_queues(dev);
- else
- netif_tx_stop_all_queues(dev);
-
- trace_dfc_qmi_tc(dev->name, 0xFF, 0, fc_info->num_bytes, 0, 0, enable);
+ list_for_each_entry(flow_itm, &qos->flow_head, list) {
+ qlen = qmi_rmnet_flow_control(dev, flow_itm->tcm_handle,
+ enable);
+ trace_dfc_qmi_tc(dev->name, flow_itm->bearer_id,
+ flow_itm->flow_id, fc_info->num_bytes,
+ qlen, flow_itm->tcm_handle, enable);
+ rc++;
+ }
if (enable == 0 && ack_req)
dfc_send_ack(dev, fc_info->bearer_id,
fc_info->seq_num, fc_info->mux_id,
DFC_ACK_TYPE_DISABLE);
- return 0;
+ return rc;
}
static int dfc_update_fc_map(struct net_device *dev, struct qos_info *qos,
@@ -743,33 +915,42 @@
{
struct rmnet_bearer_map *itm = NULL;
int rc = 0;
- int action = -1;
+ bool action = false;
itm = qmi_rmnet_get_bearer_map(qos, fc_info->bearer_id);
if (itm) {
- if (itm->grant_size == 0 && fc_info->num_bytes > 0)
- action = 1;
- else if (itm->grant_size > 0 && fc_info->num_bytes == 0)
- action = 0;
+ /* The RAT switch flag indicates the start and end of
+ * the switch. Ignore indications in between.
+ */
+ if (DFC_IS_RAT_SWITCH(ancillary))
+ itm->rat_switch = !fc_info->num_bytes;
+ else
+ if (itm->rat_switch)
+ return 0;
+
+ if ((itm->grant_size == 0 && fc_info->num_bytes > 0) ||
+ (itm->grant_size > 0 && fc_info->num_bytes == 0))
+ action = true;
itm->grant_size = fc_info->num_bytes;
itm->grant_thresh = qmi_rmnet_grant_per(itm->grant_size);
itm->seq = fc_info->seq_num;
itm->ack_req = ack_req;
- itm->ancillary = ancillary;
+ itm->tcp_bidir = DFC_IS_TCP_BIDIR(ancillary);
+ itm->last_grant = fc_info->num_bytes;
+ itm->last_seq = fc_info->seq_num;
- if (action != -1)
+ if (action)
rc = dfc_bearer_flow_ctl(dev, itm, qos);
} else {
- pr_debug("grant %u before flow activate\n", fc_info->num_bytes);
qos->default_grant = fc_info->num_bytes;
}
return rc;
}
-static void dfc_do_burst_flow_control(struct work_struct *work)
+static void dfc_do_burst_flow_control(struct dfc_qmi_data *dfc,
+ struct dfc_svc_ind *svc_ind)
{
- struct dfc_svc_ind *svc_ind = (struct dfc_svc_ind *)work;
struct dfc_flow_status_ind_msg_v01 *ind = &svc_ind->dfc_info;
struct net_device *dev;
struct qos_info *qos;
@@ -779,11 +960,6 @@
u32 ancillary;
int i, j;
- if (unlikely(svc_ind->data->restart_state)) {
- kfree(svc_ind);
- return;
- }
-
rcu_read_lock();
for (i = 0; i < ind->flow_status_len; i++) {
@@ -801,7 +977,7 @@
}
}
- trace_dfc_flow_ind(svc_ind->data->index,
+ trace_dfc_flow_ind(dfc->index,
i, flow_status->mux_id,
flow_status->bearer_id,
flow_status->num_bytes,
@@ -809,7 +985,7 @@
ack_req,
ancillary);
- dev = rmnet_get_rmnet_dev(svc_ind->data->rmnet_port,
+ dev = rmnet_get_rmnet_dev(dfc->rmnet_port,
flow_status->mux_id);
if (!dev)
goto clean_out;
@@ -832,7 +1008,38 @@
clean_out:
rcu_read_unlock();
- kfree(svc_ind);
+}
+
+static void dfc_qmi_ind_work(struct work_struct *work)
+{
+ struct dfc_qmi_data *dfc = container_of(work, struct dfc_qmi_data,
+ qmi_ind_work);
+ struct dfc_svc_ind *svc_ind;
+ unsigned long flags;
+
+ if (!dfc)
+ return;
+
+ local_bh_disable();
+
+ do {
+ spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
+ svc_ind = list_first_entry_or_null(&dfc->qmi_ind_q,
+ struct dfc_svc_ind, list);
+ if (svc_ind)
+ list_del(&svc_ind->list);
+ spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
+
+ if (svc_ind) {
+ if (!dfc->restart_state)
+ dfc_do_burst_flow_control(dfc, svc_ind);
+ kfree(svc_ind);
+ }
+ } while (svc_ind != NULL);
+
+ local_bh_enable();
+
+ qmi_rmnet_set_dl_msg_active(dfc->rmnet_port);
}
static void dfc_clnt_ind_cb(struct qmi_handle *qmi, struct sockaddr_qrtr *sq,
@@ -842,6 +1049,7 @@
handle);
struct dfc_flow_status_ind_msg_v01 *ind_msg;
struct dfc_svc_ind *svc_ind;
+ unsigned long flags;
if (qmi != &dfc->handle)
return;
@@ -858,13 +1066,13 @@
if (!svc_ind)
return;
- INIT_WORK((struct work_struct *)svc_ind,
- dfc_do_burst_flow_control);
-
memcpy(&svc_ind->dfc_info, ind_msg, sizeof(*ind_msg));
- svc_ind->data = dfc;
- queue_work(dfc->dfc_wq, (struct work_struct *)svc_ind);
+ spin_lock_irqsave(&dfc->qmi_ind_lock, flags);
+ list_add_tail(&svc_ind->list, &dfc->qmi_ind_q);
+ spin_unlock_irqrestore(&dfc->qmi_ind_lock, flags);
+
+ queue_work(dfc->dfc_wq, &dfc->qmi_ind_work);
}
}
@@ -875,25 +1083,32 @@
svc_arrive);
struct qmi_info *qmi;
+ if (data->restart_state == 1)
+ return;
+
+ rc = dfc_init_service(data);
+ if (rc < 0) {
+ pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
+ return;
+ }
+
+ rtnl_lock();
qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
- if (!qmi)
- goto clean_out;
+ if (!qmi) {
+ rtnl_unlock();
+ return;
+ }
- rc = dfc_init_service(data, qmi);
- if (rc < 0)
- goto clean_out;
-
- qmi->fc_info[data->index].dfc_client = (void *)data;
+ qmi->dfc_pending[data->index] = NULL;
+ qmi->dfc_clients[data->index] = (void *)data;
trace_dfc_client_state_up(data->index,
- qmi->fc_info[data->index].svc.instance,
- qmi->fc_info[data->index].svc.ep_type,
- qmi->fc_info[data->index].svc.iface_id);
- return;
+ data->svc.instance,
+ data->svc.ep_type,
+ data->svc.iface_id);
-clean_out:
- qmi_handle_release(&data->handle);
- destroy_workqueue(data->dfc_wq);
- kfree(data);
+ rtnl_unlock();
+
+ pr_info("Connection established with the DFC Service\n");
}
static int dfc_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
@@ -935,11 +1150,15 @@
{},
};
-int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi)
+int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+ struct qmi_info *qmi)
{
struct dfc_qmi_data *data;
int rc = -ENOMEM;
+ if (!port || !qmi)
+ return -EINVAL;
+
data = kzalloc(sizeof(struct dfc_qmi_data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -947,6 +1166,11 @@
data->rmnet_port = port;
data->index = index;
data->restart_state = 0;
+ memcpy(&data->svc, psvc, sizeof(data->svc));
+
+ INIT_WORK(&data->qmi_ind_work, dfc_qmi_ind_work);
+ INIT_LIST_HEAD(&data->qmi_ind_q);
+ spin_lock_init(&data->qmi_ind_lock);
data->dfc_wq = create_singlethread_workqueue("dfc_wq");
if (!data->dfc_wq) {
@@ -956,7 +1180,7 @@
INIT_WORK(&data->svc_arrive, dfc_svc_init);
rc = qmi_handle_init(&data->handle,
- QMI_DFC_FLOW_STATUS_IND_V01_MAX_MSG_LEN,
+ QMI_DFC_GET_FLOW_STATUS_RESP_V01_MAX_MSG_LEN,
&server_ops, qmi_indication_handler);
if (rc < 0) {
pr_err("%s: failed qmi_handle_init - rc[%d]\n", __func__, rc);
@@ -965,12 +1189,14 @@
rc = qmi_add_lookup(&data->handle, DFC_SERVICE_ID_V01,
DFC_SERVICE_VERS_V01,
- qmi->fc_info[index].svc.instance);
+ psvc->instance);
if (rc < 0) {
pr_err("%s: failed qmi_add_lookup - rc[%d]\n", __func__, rc);
goto err2;
}
+ qmi->dfc_pending[index] = (void *)data;
+
return 0;
err2:
@@ -1049,8 +1275,45 @@
int i;
for (i = 0; i < MAX_CLIENT_NUM; i++) {
- dfc_data = (struct dfc_qmi_data *)(qmi->fc_info[i].dfc_client);
+ dfc_data = (struct dfc_qmi_data *)(qmi->dfc_clients[i]);
if (dfc_data)
flush_workqueue(dfc_data->dfc_wq);
}
}
+
+void dfc_qmi_query_flow(void *dfc_data)
+{
+ struct dfc_qmi_data *data = (struct dfc_qmi_data *)dfc_data;
+ struct dfc_get_flow_status_resp_msg_v01 *resp;
+ struct dfc_svc_ind *svc_ind;
+ int rc;
+
+ resp = kzalloc(sizeof(*resp), GFP_ATOMIC);
+ if (!resp)
+ return;
+
+ svc_ind = kzalloc(sizeof(*svc_ind), GFP_ATOMIC);
+ if (!svc_ind) {
+ kfree(resp);
+ return;
+ }
+
+ if (!data)
+ goto done;
+
+ rc = dfc_get_flow_status_req(&data->handle, &data->ssctl, resp);
+
+ if (rc < 0 || !resp->flow_status_valid || resp->flow_status_len < 1 ||
+ resp->flow_status_len > DFC_MAX_BEARERS_V01)
+ goto done;
+
+ svc_ind->dfc_info.flow_status_valid = resp->flow_status_valid;
+ svc_ind->dfc_info.flow_status_len = resp->flow_status_len;
+ memcpy(&svc_ind->dfc_info.flow_status, resp->flow_status,
+ sizeof(resp->flow_status[0]) * resp->flow_status_len);
+ dfc_do_burst_flow_control(data, svc_ind);
+
+done:
+ kfree(svc_ind);
+ kfree(resp);
+}
diff --git a/drivers/soc/qcom/qdss_bridge.c b/drivers/soc/qcom/qdss_bridge.c
index d5a1a97..2d1e465 100644
--- a/drivers/soc/qcom/qdss_bridge.c
+++ b/drivers/soc/qcom/qdss_bridge.c
@@ -18,6 +18,7 @@
#include <linux/mhi.h>
#include <linux/usb/usb_qdss.h>
#include <linux/of.h>
+#include <linux/delay.h>
#include "qdss_bridge.h"
#define MODULE_NAME "qdss_bridge"
@@ -142,6 +143,20 @@
return NULL;
}
+static int qdss_check_entry(struct qdss_bridge_drvdata *drvdata)
+{
+ struct qdss_buf_tbl_lst *entry;
+ int ret = 0;
+
+ list_for_each_entry(entry, &drvdata->buf_tbl, link) {
+ if (atomic_read(&entry->available) == 0) {
+ ret = 1;
+ return ret;
+ }
+ }
+
+ return ret;
+}
static void qdss_del_buf_tbl_entry(struct qdss_bridge_drvdata *drvdata,
void *buf)
@@ -808,9 +823,11 @@
spin_unlock_bh(&drvdata->lock);
if (drvdata->usb_ch && drvdata->usb_ch->priv_usb)
usb_qdss_close(drvdata->usb_ch);
+ do {
+ msleep(20);
+ } while (qdss_check_entry(drvdata));
}
mhi_ch_close(drvdata);
-
} else
spin_unlock_bh(&drvdata->lock);
diff --git a/drivers/soc/qcom/qmi_encdec.c b/drivers/soc/qcom/qmi_encdec.c
index 3aaab71..331d67f 100644
--- a/drivers/soc/qcom/qmi_encdec.c
+++ b/drivers/soc/qcom/qmi_encdec.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0
/*
- * Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2015, 2019 The Linux Foundation. All rights reserved.
* Copyright (C) 2017 Linaro Ltd.
*/
#include <linux/slab.h>
@@ -534,8 +534,8 @@
decoded_bytes += rc;
}
- if (string_len > temp_ei->elem_len) {
- pr_err("%s: String len %d > Max Len %d\n",
+ if (string_len >= temp_ei->elem_len) {
+ pr_err("%s: String len %d >= Max Len %d\n",
__func__, string_len, temp_ei->elem_len);
return -ETOOSMALL;
} else if (string_len > tlv_len) {
diff --git a/drivers/soc/qcom/qmi_interface.c b/drivers/soc/qcom/qmi_interface.c
index 4400f51..a391dae 100644
--- a/drivers/soc/qcom/qmi_interface.c
+++ b/drivers/soc/qcom/qmi_interface.c
@@ -347,6 +347,13 @@
ret = wait_for_completion_timeout(&txn->completion, timeout);
+ mutex_lock(&txn->lock);
+ if (txn->result == -ENETRESET) {
+ mutex_unlock(&txn->lock);
+ return txn->result;
+ }
+ mutex_unlock(&txn->lock);
+
mutex_lock(&qmi->txn_lock);
mutex_lock(&txn->lock);
idr_remove(&qmi->txns, txn->id);
@@ -446,17 +453,18 @@
if (IS_ERR(sock))
return;
- mutex_lock(&qmi->sock_lock);
- sock_release(qmi->sock);
- qmi->sock = NULL;
- mutex_unlock(&qmi->sock_lock);
-
qmi_recv_del_server(qmi, -1, -1);
if (qmi->ops.net_reset)
qmi->ops.net_reset(qmi);
mutex_lock(&qmi->sock_lock);
+ /* Already qmi_handle_release() started */
+ if (!qmi->sock) {
+ sock_release(sock);
+ return;
+ }
+ sock_release(qmi->sock);
qmi->sock = sock;
qmi->sq = sq;
mutex_unlock(&qmi->sock_lock);
@@ -570,16 +578,21 @@
static void qmi_data_ready(struct sock *sk)
{
- struct qmi_handle *qmi = sk->sk_user_data;
+ struct qmi_handle *qmi = NULL;
/*
* This will be NULL if we receive data while being in
* qmi_handle_release()
*/
- if (!qmi)
+ read_lock_bh(&sk->sk_callback_lock);
+ qmi = sk->sk_user_data;
+ if (!qmi) {
+ read_unlock_bh(&sk->sk_callback_lock);
return;
+ }
queue_work(qmi->wq, &qmi->work);
+ read_unlock_bh(&sk->sk_callback_lock);
}
static struct socket *qmi_sock_create(struct qmi_handle *qmi,
@@ -602,6 +615,7 @@
sock->sk->sk_user_data = qmi;
sock->sk->sk_data_ready = qmi_data_ready;
sock->sk->sk_error_report = qmi_data_ready;
+ sock->sk->sk_sndtimeo = HZ * 10;
return sock;
}
@@ -682,21 +696,35 @@
*/
void qmi_handle_release(struct qmi_handle *qmi)
{
- struct socket *sock = qmi->sock;
+ struct socket *sock;
struct qmi_service *svc, *tmp;
-
- sock->sk->sk_user_data = NULL;
- cancel_work_sync(&qmi->work);
-
- qmi_recv_del_server(qmi, -1, -1);
+ struct qmi_txn *txn;
+ int txn_id;
mutex_lock(&qmi->sock_lock);
+ sock = qmi->sock;
+ write_lock_bh(&sock->sk->sk_callback_lock);
+ sock->sk->sk_user_data = NULL;
+ write_unlock_bh(&sock->sk->sk_callback_lock);
sock_release(sock);
qmi->sock = NULL;
mutex_unlock(&qmi->sock_lock);
+ cancel_work_sync(&qmi->work);
+
+ qmi_recv_del_server(qmi, -1, -1);
+
destroy_workqueue(qmi->wq);
+ mutex_lock(&qmi->txn_lock);
+ idr_for_each_entry(&qmi->txns, txn, txn_id) {
+ mutex_lock(&txn->lock);
+ idr_remove(&qmi->txns, txn->id);
+ txn->result = -ENETRESET;
+ complete(&txn->completion);
+ mutex_unlock(&txn->lock);
+ }
+ mutex_unlock(&qmi->txn_lock);
idr_destroy(&qmi->txns);
kfree(qmi->recv_buf);
@@ -761,7 +789,7 @@
if (qmi->sock) {
ret = kernel_sendmsg(qmi->sock, &msghdr, &iv, 1, len);
if (ret < 0)
- pr_err("failed to send QMI message\n");
+ pr_info("failed to send QMI message %d\n", ret);
} else {
ret = -EPIPE;
}
diff --git a/drivers/soc/qcom/qmi_rmnet.c b/drivers/soc/qcom/qmi_rmnet.c
index 9e00f79..047de99 100644
--- a/drivers/soc/qcom/qmi_rmnet.c
+++ b/drivers/soc/qcom/qmi_rmnet.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#include <soc/qcom/qmi_rmnet.h>
@@ -25,13 +25,16 @@
#define FLAG_POWERSAVE_MASK 0x0010
#define DFC_MODE_MULTIQ 2
-unsigned int rmnet_wq_frequency __read_mostly = 4;
+unsigned int rmnet_wq_frequency __read_mostly = 1000;
#define PS_WORK_ACTIVE_BIT 0
-#define PS_INTERVAL (((!rmnet_wq_frequency) ? 1 : rmnet_wq_frequency) * HZ)
+#define PS_INTERVAL (((!rmnet_wq_frequency) ? \
+ 1 : rmnet_wq_frequency/10) * (HZ/100))
#define NO_DELAY (0x0000 * HZ)
+#ifdef CONFIG_QCOM_QMI_DFC
static unsigned int qmi_rmnet_scale_factor = 5;
+#endif
struct qmi_elem_info data_ep_id_type_v01_ei[] = {
{
@@ -74,8 +77,8 @@
return NULL;
for (i = 0; i < MAX_CLIENT_NUM; i++) {
- if (qmi->fc_info[i].dfc_client)
- return qmi->fc_info[i].dfc_client;
+ if (qmi->dfc_clients[i])
+ return qmi->dfc_clients[i];
}
return NULL;
@@ -90,6 +93,22 @@
return qmi_rmnet_has_dfc_client(qmi) ? 1 : 0;
}
+static int
+qmi_rmnet_has_pending(struct qmi_info *qmi)
+{
+ int i;
+
+ if (qmi->wda_pending)
+ return 1;
+
+ for (i = 0; i < MAX_CLIENT_NUM; i++) {
+ if (qmi->dfc_pending[i])
+ return 1;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_QCOM_QMI_DFC
static void
qmi_rmnet_clean_flow_list(struct qmi_info *qmi, struct net_device *dev,
@@ -295,6 +314,17 @@
return 0;
}
+
+static void qmi_rmnet_query_flows(struct qmi_info *qmi)
+{
+ int i;
+
+ for (i = 0; i < MAX_CLIENT_NUM; i++) {
+ if (qmi->dfc_clients[i])
+ dfc_qmi_query_flow(qmi->dfc_clients[i]);
+ }
+}
+
#else
static inline void
qmi_rmnet_update_flow_link(struct qmi_info *qmi, struct net_device *dev,
@@ -325,12 +355,17 @@
{
return -EINVAL;
}
+
+static inline void qmi_rmnet_query_flows(struct qmi_info *qmi)
+{
+}
#endif
static int
qmi_rmnet_setup_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
{
int idx, rc, err = 0;
+ struct svc_info svc;
ASSERT_RTNL();
@@ -341,7 +376,7 @@
idx = (tcm->tcm_handle == 0) ? 0 : 1;
if (!qmi) {
- qmi = kzalloc(sizeof(struct qmi_info), GFP_KERNEL);
+ qmi = kzalloc(sizeof(struct qmi_info), GFP_ATOMIC);
if (!qmi)
return -ENOMEM;
@@ -349,20 +384,20 @@
}
qmi->flag = tcm->tcm_ifindex;
- qmi->fc_info[idx].svc.instance = tcm->tcm_handle;
- qmi->fc_info[idx].svc.ep_type = tcm->tcm_info;
- qmi->fc_info[idx].svc.iface_id = tcm->tcm_parent;
+ svc.instance = tcm->tcm_handle;
+ svc.ep_type = tcm->tcm_info;
+ svc.iface_id = tcm->tcm_parent;
if (((tcm->tcm_ifindex & FLAG_DFC_MASK) == DFC_MODE_MULTIQ) &&
- (qmi->fc_info[idx].dfc_client == NULL)) {
- rc = dfc_qmi_client_init(port, idx, qmi);
+ !qmi->dfc_clients[idx] && !qmi->dfc_pending[idx]) {
+ rc = dfc_qmi_client_init(port, idx, &svc, qmi);
if (rc < 0)
err = rc;
}
if ((tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) &&
- (idx == 0) && (qmi->wda_client == NULL)) {
- rc = wda_qmi_client_init(port, tcm->tcm_handle);
+ (idx == 0) && !qmi->wda_client && !qmi->wda_pending) {
+ rc = wda_qmi_client_init(port, &svc, qmi);
if (rc < 0)
err = rc;
}
@@ -373,15 +408,22 @@
static int
__qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, int idx)
{
+ void *data = NULL;
ASSERT_RTNL();
- if (qmi->fc_info[idx].dfc_client) {
- dfc_qmi_client_exit(qmi->fc_info[idx].dfc_client);
- qmi->fc_info[idx].dfc_client = NULL;
+ if (qmi->dfc_clients[idx])
+ data = qmi->dfc_clients[idx];
+ else if (qmi->dfc_pending[idx])
+ data = qmi->dfc_pending[idx];
+
+ if (data) {
+ dfc_qmi_client_exit(data);
+ qmi->dfc_clients[idx] = NULL;
+ qmi->dfc_pending[idx] = NULL;
}
- if (!qmi_rmnet_has_client(qmi)) {
+ if (!qmi_rmnet_has_client(qmi) && !qmi_rmnet_has_pending(qmi)) {
rmnet_reset_qmi_pt(port);
kfree(qmi);
return 0;
@@ -394,15 +436,21 @@
qmi_rmnet_delete_client(void *port, struct qmi_info *qmi, struct tcmsg *tcm)
{
int idx;
+ void *data = NULL;
/* client delete: tcm->tcm_handle - instance*/
idx = (tcm->tcm_handle == 0) ? 0 : 1;
ASSERT_RTNL();
+ if (qmi->wda_client)
+ data = qmi->wda_client;
+ else if (qmi->wda_pending)
+ data = qmi->wda_pending;
- if ((idx == 0) && qmi->wda_client) {
- wda_qmi_client_exit(qmi->wda_client);
+ if ((idx == 0) && data) {
+ wda_qmi_client_exit(data);
qmi->wda_client = NULL;
+ qmi->wda_pending = NULL;
}
__qmi_rmnet_delete_client(port, qmi, idx);
@@ -433,12 +481,15 @@
return;
if (qmi_rmnet_setup_client(port, qmi, tcm) < 0) {
- if (!qmi_rmnet_has_client(qmi)) {
- kfree(qmi);
+ /* retrieve qmi again as it could have been changed */
+ qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+ if (qmi &&
+ !qmi_rmnet_has_client(qmi) &&
+ !qmi_rmnet_has_pending(qmi)) {
rmnet_reset_qmi_pt(port);
+ kfree(qmi);
}
- }
- if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
+ } else if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
qmi_rmnet_work_init(port);
rmnet_set_powersave_format(port);
}
@@ -446,7 +497,7 @@
case NLMSG_CLIENT_DELETE:
if (!qmi)
return;
- if (tcm->tcm_ifindex & FLAG_POWERSAVE_MASK) {
+ if (tcm->tcm_handle == 0) { /* instance 0 */
rmnet_clear_powersave_format(port);
qmi_rmnet_work_exit(port);
}
@@ -471,6 +522,7 @@
{
struct qmi_info *qmi = (struct qmi_info *)qmi_pt;
int i;
+ void *data = NULL;
if (!qmi)
return;
@@ -479,9 +531,15 @@
qmi_rmnet_work_exit(port);
- if (qmi->wda_client) {
- wda_qmi_client_exit(qmi->wda_client);
+ if (qmi->wda_client)
+ data = qmi->wda_client;
+ else if (qmi->wda_pending)
+ data = qmi->wda_pending;
+
+ if (data) {
+ wda_qmi_client_exit(data);
qmi->wda_client = NULL;
+ qmi->wda_pending = NULL;
}
for (i = 0; i < MAX_CLIENT_NUM; i++) {
@@ -495,7 +553,7 @@
{
struct qos_info *qos;
struct rmnet_bearer_map *bearer;
- int do_wake = 0;
+ bool do_wake = false;
qos = (struct qos_info *)rmnet_get_qos_pt(dev);
if (!qos)
@@ -504,14 +562,14 @@
spin_lock_bh(&qos->qos_lock);
list_for_each_entry(bearer, &qos->bearer_head, list) {
- bearer->grant_before_ps = bearer->grant_size;
- bearer->seq_before_ps = bearer->seq;
+ if (!bearer->grant_size)
+ do_wake = true;
bearer->grant_size = DEFAULT_GRANT;
- bearer->grant_thresh = qmi_rmnet_grant_per(DEFAULT_GRANT);
+ bearer->grant_thresh = DEFAULT_GRANT;
bearer->seq = 0;
bearer->ack_req = 0;
- bearer->ancillary = 0;
- do_wake = 1;
+ bearer->tcp_bidir = false;
+ bearer->rat_switch = false;
}
if (do_wake) {
@@ -523,6 +581,31 @@
}
EXPORT_SYMBOL(qmi_rmnet_enable_all_flows);
+bool qmi_rmnet_all_flows_enabled(struct net_device *dev)
+{
+ struct qos_info *qos;
+ struct rmnet_bearer_map *bearer;
+ bool ret = true;
+
+ qos = (struct qos_info *)rmnet_get_qos_pt(dev);
+ if (!qos)
+ return true;
+
+ spin_lock_bh(&qos->qos_lock);
+
+ list_for_each_entry(bearer, &qos->bearer_head, list) {
+ if (!bearer->grant_size) {
+ ret = false;
+ break;
+ }
+ }
+
+ spin_unlock_bh(&qos->qos_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL(qmi_rmnet_all_flows_enabled);
+
#ifdef CONFIG_QCOM_QMI_DFC
void qmi_rmnet_burst_fc_check(struct net_device *dev,
int ip_type, u32 mark, unsigned int len)
@@ -633,7 +716,7 @@
#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
static struct workqueue_struct *rmnet_ps_wq;
static struct rmnet_powersave_work *rmnet_work;
-static struct list_head ps_list;
+static LIST_HEAD(ps_list);
struct rmnet_powersave_work {
struct delayed_work work;
@@ -711,18 +794,19 @@
if (enable)
dfc_qmi_wq_flush(qmi);
+ else
+ qmi_rmnet_query_flows(qmi);
return 0;
}
EXPORT_SYMBOL(qmi_rmnet_set_powersave_mode);
-void qmi_rmnet_work_restart(void *port)
+static void qmi_rmnet_work_restart(void *port)
{
if (!rmnet_ps_wq || !rmnet_work)
return;
queue_delayed_work(rmnet_ps_wq, &rmnet_work->work, NO_DELAY);
}
-EXPORT_SYMBOL(qmi_rmnet_work_restart);
static void qmi_rmnet_check_stats(struct work_struct *work)
{
@@ -730,6 +814,7 @@
struct qmi_info *qmi;
u64 rxd, txd;
u64 rx, tx;
+ bool dl_msg_active;
real_work = container_of(to_delayed_work(work),
struct rmnet_powersave_work, work);
@@ -742,17 +827,15 @@
return;
if (qmi->ps_enabled) {
- /* Retry after small delay if qmi error
- * This resumes UL grants by disabling
- * powersave mode if successful.
- */
+ /* Register to get QMI DFC and DL marker */
if (qmi_rmnet_set_powersave_mode(real_work->port, 0) < 0) {
+ /* If this failed need to retry quickly */
queue_delayed_work(rmnet_ps_wq,
&real_work->work, HZ / 50);
return;
}
- qmi->ps_enabled = 0;
+ qmi->ps_enabled = false;
if (rmnet_get_powersave_notif(real_work->port))
qmi_rmnet_ps_off_notify(real_work->port);
@@ -767,18 +850,29 @@
real_work->old_rx_pkts = rx;
real_work->old_tx_pkts = tx;
+ dl_msg_active = qmi->dl_msg_active;
+ qmi->dl_msg_active = false;
+
if (!rxd && !txd) {
+ /* If no DL msg received and there is a flow disabled,
+ * (likely in RLF), no need to enter powersave
+ */
+ if (!dl_msg_active &&
+ !rmnet_all_flows_enabled(real_work->port))
+ goto end;
+
+ /* Deregister to suppress QMI DFC and DL marker */
if (qmi_rmnet_set_powersave_mode(real_work->port, 1) < 0) {
queue_delayed_work(rmnet_ps_wq,
&real_work->work, PS_INTERVAL);
return;
}
- qmi->ps_enabled = 1;
- clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
+ qmi->ps_enabled = true;
- /* Enable flow after clear the bit so a new
- * work can be triggered.
+ /* Clear the bit before enabling flow so pending packets
+ * can trigger the work again
*/
+ clear_bit(PS_WORK_ACTIVE_BIT, &qmi->ps_work_active);
rmnet_enable_all_flows(real_work->port);
if (rmnet_get_powersave_notif(real_work->port))
@@ -818,7 +912,6 @@
rmnet_ps_wq = NULL;
return;
}
- INIT_LIST_HEAD(&ps_list);
INIT_DEFERRABLE_WORK(&rmnet_work->work, qmi_rmnet_check_stats);
rmnet_work->port = port;
rmnet_get_packets(rmnet_work->port, &rmnet_work->old_rx_pkts,
@@ -854,4 +947,16 @@
rmnet_work = NULL;
}
EXPORT_SYMBOL(qmi_rmnet_work_exit);
+
+void qmi_rmnet_set_dl_msg_active(void *port)
+{
+ struct qmi_info *qmi;
+
+ qmi = (struct qmi_info *)rmnet_get_qmi_pt(port);
+ if (unlikely(!qmi))
+ return;
+
+ qmi->dl_msg_active = true;
+}
+EXPORT_SYMBOL(qmi_rmnet_set_dl_msg_active);
#endif
diff --git a/drivers/soc/qcom/qmi_rmnet_i.h b/drivers/soc/qcom/qmi_rmnet_i.h
index 0efff48..7fe4862 100644
--- a/drivers/soc/qcom/qmi_rmnet_i.h
+++ b/drivers/soc/qcom/qmi_rmnet_i.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _RMNET_QMI_I_H
@@ -14,7 +14,8 @@
#define MAX_CLIENT_NUM 2
#define MAX_FLOW_NUM 32
-#define DEFAULT_GRANT 10240
+#define DEFAULT_GRANT 1
+#define DFC_MAX_BEARERS_V01 16
struct rmnet_flow_map {
struct list_head list;
@@ -32,9 +33,10 @@
u32 grant_thresh;
u16 seq;
u8 ack_req;
- u32 grant_before_ps;
- u16 seq_before_ps;
- u32 ancillary;
+ u32 last_grant;
+ u16 last_seq;
+ bool tcp_bidir;
+ bool rat_switch;
};
struct svc_info {
@@ -43,11 +45,6 @@
u32 iface_id;
};
-struct fc_info {
- struct svc_info svc;
- void *dfc_client;
-};
-
struct qos_info {
u8 mux_id;
struct net_device *real_dev;
@@ -66,9 +63,12 @@
struct qmi_info {
int flag;
void *wda_client;
- struct fc_info fc_info[MAX_CLIENT_NUM];
+ void *wda_pending;
+ void *dfc_clients[MAX_CLIENT_NUM];
+ void *dfc_pending[MAX_CLIENT_NUM];
unsigned long ps_work_active;
- int ps_enabled;
+ bool ps_enabled;
+ bool dl_msg_active;
};
enum data_ep_type_enum_v01 {
@@ -101,7 +101,8 @@
unsigned int qmi_rmnet_grant_per(unsigned int grant);
-int dfc_qmi_client_init(void *port, int index, struct qmi_info *qmi);
+int dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+ struct qmi_info *qmi);
void dfc_qmi_client_exit(void *dfc_data);
@@ -112,6 +113,7 @@
void dfc_qmi_wq_flush(struct qmi_info *qmi);
+void dfc_qmi_query_flow(void *dfc_data);
#else
static inline struct rmnet_flow_map *
qmi_rmnet_get_flow_map(struct qos_info *qos_info,
@@ -121,13 +123,14 @@
}
static inline struct rmnet_bearer_map *
-qmi_rmnet_get_bearer_map(struct qos_info *qos_info, uint8_t bearer_id)
+qmi_rmnet_get_bearer_map(struct qos_info *qos_info, u8 bearer_id)
{
return NULL;
}
static inline int
-dfc_qmi_client_init(void *port, int modem, struct qmi_info *qmi)
+dfc_qmi_client_init(void *port, int index, struct svc_info *psvc,
+ struct qmi_info *qmi)
{
return -EINVAL;
}
@@ -146,14 +149,21 @@
dfc_qmi_wq_flush(struct qmi_info *qmi)
{
}
+
+static inline void
+dfc_qmi_query_flow(void *dfc_data)
+{
+}
#endif
#ifdef CONFIG_QCOM_QMI_POWER_COLLAPSE
-int wda_qmi_client_init(void *port, uint32_t instance);
+int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi);
void wda_qmi_client_exit(void *wda_data);
-int wda_set_powersave_mode(void *wda_data, uint8_t enable);
+int wda_set_powersave_mode(void *wda_data, u8 enable);
#else
-static inline int wda_qmi_client_init(void *port, uint32_t instance)
+static inline int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
{
return -EINVAL;
}
@@ -162,7 +172,7 @@
{
}
-static inline int wda_set_powersave_mode(void *wda_data, uint8_t enable)
+static inline int wda_set_powersave_mode(void *wda_data, u8 enable)
{
return -EINVAL;
}
diff --git a/drivers/soc/qcom/smp2p.c b/drivers/soc/qcom/smp2p.c
index b8379f1..b8585d1 100644
--- a/drivers/soc/qcom/smp2p.c
+++ b/drivers/soc/qcom/smp2p.c
@@ -26,6 +26,7 @@
#include <linux/soc/qcom/smem.h>
#include <linux/soc/qcom/smem_state.h>
#include <linux/spinlock.h>
+#include <linux/pm_wakeup.h>
#include <linux/ipc_logging.h>
@@ -160,6 +161,7 @@
struct regmap *ipc_regmap;
int ipc_offset;
int ipc_bit;
+ struct wakeup_source ws;
struct mbox_client mbox_client;
struct mbox_chan *mbox_chan;
@@ -297,6 +299,14 @@
}
}
+static irqreturn_t qcom_smp2p_isr(int irq, void *data)
+{
+ struct qcom_smp2p *smp2p = data;
+
+ __pm_stay_awake(&smp2p->ws);
+ return IRQ_WAKE_THREAD;
+}
+
/**
* qcom_smp2p_intr() - interrupt handler for incoming notifications
* @irq: unused
@@ -321,7 +331,7 @@
if (IS_ERR(in)) {
dev_err(smp2p->dev,
"Unable to acquire remote smp2p item\n");
- return IRQ_HANDLED;
+ goto out;
}
smp2p->in = in;
@@ -340,6 +350,8 @@
qcom_smp2p_do_ssr_ack(smp2p);
}
+out:
+ __pm_relax(&smp2p->ws);
return IRQ_HANDLED;
}
@@ -636,12 +648,13 @@
list_add(&entry->node, &smp2p->outbound);
}
}
+ wakeup_source_init(&smp2p->ws, "smp2p");
/* Kick the outgoing edge after allocating entries */
qcom_smp2p_kick(smp2p);
ret = devm_request_threaded_irq(&pdev->dev, smp2p->irq,
- NULL, qcom_smp2p_intr,
+ qcom_smp2p_isr, qcom_smp2p_intr,
IRQF_NO_SUSPEND | IRQF_ONESHOT,
"smp2p", (void *)smp2p);
if (ret) {
diff --git a/drivers/soc/qcom/wda_qmi.c b/drivers/soc/qcom/wda_qmi.c
index 2c15cb8..4fc5c3e 100644
--- a/drivers/soc/qcom/wda_qmi.c
+++ b/drivers/soc/qcom/wda_qmi.c
@@ -1,8 +1,9 @@
// SPDX-License-Identifier: GPL-2.0-only
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
+#include <linux/rtnetlink.h>
#include <linux/soc/qcom/qmi.h>
#include <soc/qcom/rmnet_qmi.h>
#define CREATE_TRACE_POINTS
@@ -15,13 +16,14 @@
struct work_struct svc_arrive;
struct qmi_handle handle;
struct sockaddr_qrtr ssctl;
+ struct svc_info svc;
};
static void wda_svc_config(struct work_struct *work);
/* **************************************************** */
#define WDA_SERVICE_ID_V01 0x1A
#define WDA_SERVICE_VERS_V01 0x01
-#define WDA_TIMEOUT_MS 20
+#define WDA_TIMEOUT_JF msecs_to_jiffies(1000)
#define QMI_WDA_SET_POWERSAVE_CONFIG_REQ_V01 0x002D
#define QMI_WDA_SET_POWERSAVE_CONFIG_RESP_V01 0x002D
@@ -231,7 +233,7 @@
goto out;
}
- ret = qmi_txn_wait(&txn, WDA_TIMEOUT_MS);
+ ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
if (ret < 0) {
pr_err("%s() Response waiting failed, err: %d\n",
__func__, ret);
@@ -247,8 +249,7 @@
return ret;
}
-static int wda_set_powersave_config_req(struct qmi_handle *wda_handle,
- struct qmi_info *qmi)
+static int wda_set_powersave_config_req(struct qmi_handle *wda_handle)
{
struct wda_qmi_data *data = container_of(wda_handle,
struct wda_qmi_data, handle);
@@ -275,8 +276,8 @@
goto out;
}
- req->ep_id.ep_type = qmi->fc_info[0].svc.ep_type;
- req->ep_id.iface_id = qmi->fc_info[0].svc.iface_id;
+ req->ep_id.ep_type = data->svc.ep_type;
+ req->ep_id.iface_id = data->svc.iface_id;
req->req_data_cfg_valid = 1;
req->req_data_cfg = WDA_DATA_POWERSAVE_CONFIG_ALL_MASK_V01;
ret = qmi_send_request(wda_handle, &data->ssctl, &txn,
@@ -289,7 +290,7 @@
goto out;
}
- ret = qmi_txn_wait(&txn, WDA_TIMEOUT_MS);
+ ret = qmi_txn_wait(&txn, WDA_TIMEOUT_JF);
if (ret < 0) {
pr_err("%s() Response waiting failed, err: %d\n",
__func__, ret);
@@ -310,28 +311,30 @@
struct wda_qmi_data *data = container_of(work, struct wda_qmi_data,
svc_arrive);
struct qmi_info *qmi;
+ int rc;
- qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
- if (!qmi)
- goto clean_out;
-
- if (wda_set_powersave_config_req(&data->handle, qmi) < 0) {
- pr_err("%s() failed, qmi handle pt: %p\n",
- __func__, &data->handle);
- goto clean_out;
+ rc = wda_set_powersave_config_req(&data->handle);
+ if (rc < 0) {
+ pr_err("%s Failed to init service, err[%d]\n", __func__, rc);
+ return;
}
- trace_wda_client_state_up(qmi->fc_info[0].svc.instance,
- qmi->fc_info[0].svc.ep_type,
- qmi->fc_info[0].svc.iface_id);
- qmi->wda_client = (void *)data;
- pr_info("Connection established with the WDA Service\n");
- return;
+ rtnl_lock();
+ qmi = (struct qmi_info *)rmnet_get_qmi_pt(data->rmnet_port);
+ if (!qmi) {
+ rtnl_unlock();
+ return;
+ }
-clean_out:
- qmi_handle_release(&data->handle);
- destroy_workqueue(data->wda_wq);
- kfree(data);
+ qmi->wda_pending = NULL;
+ qmi->wda_client = (void *)data;
+ trace_wda_client_state_up(data->svc.instance,
+ data->svc.ep_type,
+ data->svc.iface_id);
+
+ rtnl_unlock();
+
+ pr_info("Connection established with the WDA Service\n");
}
static int wda_svc_arrive(struct qmi_handle *qmi, struct qmi_service *svc)
@@ -362,16 +365,15 @@
.del_server = wda_svc_exit,
};
-int wda_qmi_client_init(void *port, uint32_t instance)
+int
+wda_qmi_client_init(void *port, struct svc_info *psvc, struct qmi_info *qmi)
{
struct wda_qmi_data *data;
- int rc = 0;
+ int rc = -ENOMEM;
- if (!port)
+ if (!port || !qmi)
return -EINVAL;
- pr_info("%s\n", __func__);
-
data = kzalloc(sizeof(*data), GFP_KERNEL);
if (!data)
return -ENOMEM;
@@ -379,11 +381,11 @@
data->wda_wq = create_singlethread_workqueue("wda_wq");
if (!data->wda_wq) {
pr_err("%s Could not create workqueue\n", __func__);
- kfree(data);
- return -ENOMEM;
+ goto err0;
}
data->rmnet_port = port;
+ memcpy(&data->svc, psvc, sizeof(data->svc));
INIT_WORK(&data->svc_arrive, wda_svc_config);
rc = qmi_handle_init(&data->handle,
@@ -391,19 +393,25 @@
&server_ops, NULL);
if (rc < 0) {
pr_err("%s: Failed qmi_handle_init, err: %d\n", __func__, rc);
- kfree(data);
- return rc;
+ goto err1;
}
rc = qmi_add_lookup(&data->handle, WDA_SERVICE_ID_V01,
- WDA_SERVICE_VERS_V01, instance);
+ WDA_SERVICE_VERS_V01, psvc->instance);
if (rc < 0) {
pr_err("%s(): Failed qmi_add_lookup, err: %d\n", __func__, rc);
- qmi_handle_release(&data->handle);
- destroy_workqueue(data->wda_wq);
- kfree(data);
+ goto err2;
}
+ qmi->wda_pending = (void *)data;
+ return 0;
+
+err2:
+ qmi_handle_release(&data->handle);
+err1:
+ destroy_workqueue(data->wda_wq);
+err0:
+ kfree(data);
return rc;
}
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index a880b5c..be81533 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -75,6 +75,9 @@
/* LRU list of unpinned pages, protected by ashmem_mutex */
static LIST_HEAD(ashmem_lru_list);
+static atomic_t ashmem_shrink_inflight = ATOMIC_INIT(0);
+static DECLARE_WAIT_QUEUE_HEAD(ashmem_shrink_wait);
+
/*
* long lru_count - The count of pages on our LRU list.
*
@@ -168,19 +171,15 @@
* @end: The ending page (inclusive)
*
* This function is protected by ashmem_mutex.
- *
- * Return: 0 if successful, or -ENOMEM if there is an error
*/
-static int range_alloc(struct ashmem_area *asma,
- struct ashmem_range *prev_range, unsigned int purged,
- size_t start, size_t end)
+static void range_alloc(struct ashmem_area *asma,
+ struct ashmem_range *prev_range, unsigned int purged,
+ size_t start, size_t end,
+ struct ashmem_range **new_range)
{
- struct ashmem_range *range;
+ struct ashmem_range *range = *new_range;
- range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
- if (!range)
- return -ENOMEM;
-
+ *new_range = NULL;
range->asma = asma;
range->pgstart = start;
range->pgend = end;
@@ -190,8 +189,6 @@
if (range_on_lru(range))
lru_add(range);
-
- return 0;
}
/**
@@ -438,7 +435,6 @@
static unsigned long
ashmem_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
{
- struct ashmem_range *range, *next;
unsigned long freed = 0;
/* We might recurse into filesystem code, so bail out if necessary */
@@ -448,21 +444,33 @@
if (!mutex_trylock(&ashmem_mutex))
return -1;
- list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
+ while (!list_empty(&ashmem_lru_list)) {
+ struct ashmem_range *range =
+ list_first_entry(&ashmem_lru_list, typeof(*range), lru);
loff_t start = range->pgstart * PAGE_SIZE;
loff_t end = (range->pgend + 1) * PAGE_SIZE;
+ struct file *f = range->asma->file;
- range->asma->file->f_op->fallocate(range->asma->file,
- FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
- start, end - start);
+ get_file(f);
+ atomic_inc(&ashmem_shrink_inflight);
range->purged = ASHMEM_WAS_PURGED;
lru_del(range);
freed += range_size(range);
+ mutex_unlock(&ashmem_mutex);
+ f->f_op->fallocate(f,
+ FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
+ start, end - start);
+ fput(f);
+ if (atomic_dec_and_test(&ashmem_shrink_inflight))
+ wake_up_all(&ashmem_shrink_wait);
+ if (!mutex_trylock(&ashmem_mutex))
+ goto out;
if (--sc->nr_to_scan <= 0)
break;
}
mutex_unlock(&ashmem_mutex);
+out:
return freed;
}
@@ -582,7 +590,8 @@
*
* Caller must hold ashmem_mutex.
*/
-static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+static int ashmem_pin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
+ struct ashmem_range **new_range)
{
struct ashmem_range *range, *next;
int ret = ASHMEM_NOT_PURGED;
@@ -635,7 +644,7 @@
* second half and adjust the first chunk's endpoint.
*/
range_alloc(asma, range, range->purged,
- pgend + 1, range->pgend);
+ pgend + 1, range->pgend, new_range);
range_shrink(range, range->pgstart, pgstart - 1);
break;
}
@@ -649,7 +658,8 @@
*
* Caller must hold ashmem_mutex.
*/
-static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend)
+static int ashmem_unpin(struct ashmem_area *asma, size_t pgstart, size_t pgend,
+ struct ashmem_range **new_range)
{
struct ashmem_range *range, *next;
unsigned int purged = ASHMEM_NOT_PURGED;
@@ -675,7 +685,8 @@
}
}
- return range_alloc(asma, range, purged, pgstart, pgend);
+ range_alloc(asma, range, purged, pgstart, pgend, new_range);
+ return 0;
}
/*
@@ -708,11 +719,19 @@
struct ashmem_pin pin;
size_t pgstart, pgend;
int ret = -EINVAL;
+ struct ashmem_range *range = NULL;
if (copy_from_user(&pin, p, sizeof(pin)))
return -EFAULT;
+ if (cmd == ASHMEM_PIN || cmd == ASHMEM_UNPIN) {
+ range = kmem_cache_zalloc(ashmem_range_cachep, GFP_KERNEL);
+ if (!range)
+ return -ENOMEM;
+ }
+
mutex_lock(&ashmem_mutex);
+ wait_event(ashmem_shrink_wait, !atomic_read(&ashmem_shrink_inflight));
if (!asma->file)
goto out_unlock;
@@ -735,10 +754,10 @@
switch (cmd) {
case ASHMEM_PIN:
- ret = ashmem_pin(asma, pgstart, pgend);
+ ret = ashmem_pin(asma, pgstart, pgend, &range);
break;
case ASHMEM_UNPIN:
- ret = ashmem_unpin(asma, pgstart, pgend);
+ ret = ashmem_unpin(asma, pgstart, pgend, &range);
break;
case ASHMEM_GET_PIN_STATUS:
ret = ashmem_get_pin_status(asma, pgstart, pgend);
@@ -747,6 +766,8 @@
out_unlock:
mutex_unlock(&ashmem_mutex);
+ if (range)
+ kmem_cache_free(ashmem_range_cachep, range);
return ret;
}
diff --git a/drivers/staging/android/ion/ion_system_heap.c b/drivers/staging/android/ion/ion_system_heap.c
index 9453d6f..35355e5 100644
--- a/drivers/staging/android/ion/ion_system_heap.c
+++ b/drivers/staging/android/ion/ion_system_heap.c
@@ -619,6 +619,7 @@
bool cached)
{
int i;
+
for (i = 0; i < NUM_ORDERS; i++) {
struct ion_page_pool *pool;
gfp_t gfp_flags = low_order_gfp_flags;
diff --git a/drivers/staging/comedi/drivers/ni_660x.c b/drivers/staging/comedi/drivers/ni_660x.c
index e521ed9..35bd4d2 100644
--- a/drivers/staging/comedi/drivers/ni_660x.c
+++ b/drivers/staging/comedi/drivers/ni_660x.c
@@ -602,6 +602,7 @@
case NI_660X_PFI_OUTPUT_DIO:
if (chan > 31)
return -EINVAL;
+ break;
default:
return -EINVAL;
}
diff --git a/drivers/staging/erofs/inode.c b/drivers/staging/erofs/inode.c
index 9e7815f..7448744 100644
--- a/drivers/staging/erofs/inode.c
+++ b/drivers/staging/erofs/inode.c
@@ -184,16 +184,16 @@
/* setup the new inode */
if (S_ISREG(inode->i_mode)) {
#ifdef CONFIG_EROFS_FS_XATTR
- if (vi->xattr_isize)
- inode->i_op = &erofs_generic_xattr_iops;
+ inode->i_op = &erofs_generic_xattr_iops;
#endif
inode->i_fop = &generic_ro_fops;
} else if (S_ISDIR(inode->i_mode)) {
inode->i_op =
#ifdef CONFIG_EROFS_FS_XATTR
- vi->xattr_isize ? &erofs_dir_xattr_iops :
-#endif
+ &erofs_dir_xattr_iops;
+#else
&erofs_dir_iops;
+#endif
inode->i_fop = &erofs_dir_fops;
} else if (S_ISLNK(inode->i_mode)) {
/* by default, page_get_link is used for symlink */
diff --git a/drivers/staging/erofs/internal.h b/drivers/staging/erofs/internal.h
index 9f44ed8..58d8cbc 100644
--- a/drivers/staging/erofs/internal.h
+++ b/drivers/staging/erofs/internal.h
@@ -260,6 +260,7 @@
}
#define __erofs_workgroup_get(grp) atomic_inc(&(grp)->refcount)
+#define __erofs_workgroup_put(grp) atomic_dec(&(grp)->refcount)
extern int erofs_workgroup_put(struct erofs_workgroup *grp);
@@ -327,12 +328,17 @@
return blknr_to_addr(sbi->meta_blkaddr) + (nid << sbi->islotbits);
}
-#define inode_set_inited_xattr(inode) (EROFS_V(inode)->flags |= 1)
-#define inode_has_inited_xattr(inode) (EROFS_V(inode)->flags & 1)
+/* atomic flag definitions */
+#define EROFS_V_EA_INITED_BIT 0
+
+/* bitlock definitions (arranged in reverse order) */
+#define EROFS_V_BL_XATTR_BIT (BITS_PER_LONG - 1)
struct erofs_vnode {
erofs_nid_t nid;
- unsigned int flags;
+
+ /* atomic flags (including bitlocks) */
+ unsigned long flags;
unsigned char data_mapping_mode;
/* inline size in bytes */
@@ -485,8 +491,9 @@
};
-static inline struct page *erofs_get_inline_page(struct inode *inode,
- erofs_blk_t blkaddr)
+static inline struct page *
+erofs_get_inline_page(struct inode *inode,
+ erofs_blk_t blkaddr)
{
return erofs_get_meta_page(inode->i_sb,
blkaddr, S_ISDIR(inode->i_mode));
diff --git a/drivers/staging/erofs/namei.c b/drivers/staging/erofs/namei.c
index 546a471..023f64f 100644
--- a/drivers/staging/erofs/namei.c
+++ b/drivers/staging/erofs/namei.c
@@ -15,74 +15,77 @@
#include <trace/events/erofs.h>
-/* based on the value of qn->len is accurate */
-static inline int dirnamecmp(struct qstr *qn,
- struct qstr *qd, unsigned *matched)
+struct erofs_qstr {
+ const unsigned char *name;
+ const unsigned char *end;
+};
+
+/* based on the end of qn is accurate and it must have the trailing '\0' */
+static inline int dirnamecmp(const struct erofs_qstr *qn,
+ const struct erofs_qstr *qd,
+ unsigned int *matched)
{
- unsigned i = *matched, len = min(qn->len, qd->len);
-loop:
- if (unlikely(i >= len)) {
- *matched = i;
- if (qn->len < qd->len) {
- /*
- * actually (qn->len == qd->len)
- * when qd->name[i] == '\0'
- */
- return qd->name[i] == '\0' ? 0 : -1;
+ unsigned int i = *matched;
+
+ /*
+ * on-disk error, let's only BUG_ON in the debugging mode.
+ * otherwise, it will return 1 to just skip the invalid name
+ * and go on (in consideration of the lookup performance).
+ */
+ DBG_BUGON(qd->name > qd->end);
+
+ /* qd could not have trailing '\0' */
+ /* However it is absolutely safe if < qd->end */
+ while (qd->name + i < qd->end && qd->name[i] != '\0') {
+ if (qn->name[i] != qd->name[i]) {
+ *matched = i;
+ return qn->name[i] > qd->name[i] ? 1 : -1;
}
- return (qn->len > qd->len);
+ ++i;
}
-
- if (qn->name[i] != qd->name[i]) {
- *matched = i;
- return qn->name[i] > qd->name[i] ? 1 : -1;
- }
-
- ++i;
- goto loop;
+ *matched = i;
+ /* See comments in __d_alloc on the terminating NUL character */
+ return qn->name[i] == '\0' ? 0 : 1;
}
-static struct erofs_dirent *find_target_dirent(
- struct qstr *name,
- u8 *data, int maxsize)
+#define nameoff_from_disk(off, sz) (le16_to_cpu(off) & ((sz) - 1))
+
+static struct erofs_dirent *find_target_dirent(struct erofs_qstr *name,
+ u8 *data,
+ unsigned int dirblksize,
+ const int ndirents)
{
- unsigned ndirents, head, back;
- unsigned startprfx, endprfx;
+ int head, back;
+ unsigned int startprfx, endprfx;
struct erofs_dirent *const de = (struct erofs_dirent *)data;
- /* make sure that maxsize is valid */
- BUG_ON(maxsize < sizeof(struct erofs_dirent));
-
- ndirents = le16_to_cpu(de->nameoff) / sizeof(*de);
-
- /* corrupted dir (may be unnecessary...) */
- BUG_ON(!ndirents);
-
- head = 0;
+ /* since the 1st dirent has been evaluated previously */
+ head = 1;
back = ndirents - 1;
startprfx = endprfx = 0;
while (head <= back) {
- unsigned mid = head + (back - head) / 2;
- unsigned nameoff = le16_to_cpu(de[mid].nameoff);
- unsigned matched = min(startprfx, endprfx);
-
- struct qstr dname = QSTR_INIT(data + nameoff,
- unlikely(mid >= ndirents - 1) ?
- maxsize - nameoff :
- le16_to_cpu(de[mid + 1].nameoff) - nameoff);
+ const int mid = head + (back - head) / 2;
+ const int nameoff = nameoff_from_disk(de[mid].nameoff,
+ dirblksize);
+ unsigned int matched = min(startprfx, endprfx);
+ struct erofs_qstr dname = {
+ .name = data + nameoff,
+ .end = unlikely(mid >= ndirents - 1) ?
+ data + dirblksize :
+ data + nameoff_from_disk(de[mid + 1].nameoff,
+ dirblksize)
+ };
/* string comparison without already matched prefix */
int ret = dirnamecmp(name, &dname, &matched);
- if (unlikely(!ret))
+ if (unlikely(!ret)) {
return de + mid;
- else if (ret > 0) {
+ } else if (ret > 0) {
head = mid + 1;
startprfx = matched;
- } else if (unlikely(mid < 1)) /* fix "mid" overflow */
- break;
- else {
+ } else {
back = mid - 1;
endprfx = matched;
}
@@ -91,12 +94,12 @@
return ERR_PTR(-ENOENT);
}
-static struct page *find_target_block_classic(
- struct inode *dir,
- struct qstr *name, int *_diff)
+static struct page *find_target_block_classic(struct inode *dir,
+ struct erofs_qstr *name,
+ int *_ndirents)
{
- unsigned startprfx, endprfx;
- unsigned head, back;
+ unsigned int startprfx, endprfx;
+ int head, back;
struct address_space *const mapping = dir->i_mapping;
struct page *candidate = ERR_PTR(-ENOENT);
@@ -105,41 +108,43 @@
back = inode_datablocks(dir) - 1;
while (head <= back) {
- unsigned mid = head + (back - head) / 2;
+ const int mid = head + (back - head) / 2;
struct page *page = read_mapping_page(mapping, mid, NULL);
- if (IS_ERR(page)) {
-exact_out:
- if (!IS_ERR(candidate)) /* valid candidate */
- put_page(candidate);
- return page;
- } else {
- int diff;
- unsigned ndirents, matched;
- struct qstr dname;
+ if (!IS_ERR(page)) {
struct erofs_dirent *de = kmap_atomic(page);
- unsigned nameoff = le16_to_cpu(de->nameoff);
+ const int nameoff = nameoff_from_disk(de->nameoff,
+ EROFS_BLKSIZ);
+ const int ndirents = nameoff / sizeof(*de);
+ int diff;
+ unsigned int matched;
+ struct erofs_qstr dname;
- ndirents = nameoff / sizeof(*de);
-
- /* corrupted dir (should have one entry at least) */
- BUG_ON(!ndirents || nameoff > PAGE_SIZE);
+ if (unlikely(!ndirents)) {
+ DBG_BUGON(1);
+ kunmap_atomic(de);
+ put_page(page);
+ page = ERR_PTR(-EIO);
+ goto out;
+ }
matched = min(startprfx, endprfx);
dname.name = (u8 *)de + nameoff;
- dname.len = ndirents == 1 ?
- /* since the rest of the last page is 0 */
- EROFS_BLKSIZ - nameoff
- : le16_to_cpu(de[1].nameoff) - nameoff;
+ if (ndirents == 1)
+ dname.end = (u8 *)de + EROFS_BLKSIZ;
+ else
+ dname.end = (u8 *)de +
+ nameoff_from_disk(de[1].nameoff,
+ EROFS_BLKSIZ);
/* string comparison without already matched prefix */
diff = dirnamecmp(name, &dname, &matched);
kunmap_atomic(de);
if (unlikely(!diff)) {
- *_diff = 0;
- goto exact_out;
+ *_ndirents = 0;
+ goto out;
} else if (diff > 0) {
head = mid + 1;
startprfx = matched;
@@ -147,45 +152,51 @@
if (likely(!IS_ERR(candidate)))
put_page(candidate);
candidate = page;
+ *_ndirents = ndirents;
} else {
put_page(page);
- if (unlikely(mid < 1)) /* fix "mid" overflow */
- break;
-
back = mid - 1;
endprfx = matched;
}
+ continue;
}
+out: /* free if the candidate is valid */
+ if (!IS_ERR(candidate))
+ put_page(candidate);
+ return page;
}
- *_diff = 1;
return candidate;
}
int erofs_namei(struct inode *dir,
- struct qstr *name,
- erofs_nid_t *nid, unsigned *d_type)
+ struct qstr *name,
+ erofs_nid_t *nid, unsigned int *d_type)
{
- int diff;
+ int ndirents;
struct page *page;
- u8 *data;
+ void *data;
struct erofs_dirent *de;
+ struct erofs_qstr qn;
if (unlikely(!dir->i_size))
return -ENOENT;
- diff = 1;
- page = find_target_block_classic(dir, name, &diff);
+ qn.name = name->name;
+ qn.end = name->name + name->len;
+
+ ndirents = 0;
+ page = find_target_block_classic(dir, &qn, &ndirents);
if (unlikely(IS_ERR(page)))
return PTR_ERR(page);
data = kmap_atomic(page);
/* the target page has been mapped */
- de = likely(diff) ?
- /* since the rest of the last page is 0 */
- find_target_dirent(name, data, EROFS_BLKSIZ) :
- (struct erofs_dirent *)data;
+ if (ndirents)
+ de = find_target_dirent(&qn, data, EROFS_BLKSIZ, ndirents);
+ else
+ de = (struct erofs_dirent *)data;
if (likely(!IS_ERR(de))) {
*nid = le64_to_cpu(de->nid);
diff --git a/drivers/staging/erofs/unzip_vle.c b/drivers/staging/erofs/unzip_vle.c
index 1279241..f44662d 100644
--- a/drivers/staging/erofs/unzip_vle.c
+++ b/drivers/staging/erofs/unzip_vle.c
@@ -57,15 +57,30 @@
Z_EROFS_VLE_WORK_SECONDARY,
Z_EROFS_VLE_WORK_PRIMARY,
/*
- * The current work has at least been linked with the following
- * processed chained works, which means if the processing page
- * is the tail partial page of the work, the current work can
- * safely use the whole page, as illustrated below:
- * +--------------+-------------------------------------------+
- * | tail page | head page (of the previous work) |
- * +--------------+-------------------------------------------+
- * /\ which belongs to the current work
- * [ (*) this page can be used for the current work itself. ]
+ * The current work was the tail of an exist chain, and the previous
+ * processed chained works are all decided to be hooked up to it.
+ * A new chain should be created for the remaining unprocessed works,
+ * therefore different from Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
+ * the next work cannot reuse the whole page in the following scenario:
+ * ________________________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (belongs to the next work) | (belongs to the current work) |
+ * |_______PRIMARY_FOLLOWED_______|________PRIMARY_HOOKED___________|
+ */
+ Z_EROFS_VLE_WORK_PRIMARY_HOOKED,
+ /*
+ * The current work has been linked with the processed chained works,
+ * and could be also linked with the potential remaining works, which
+ * means if the processing page is the tail partial page of the work,
+ * the current work can safely use the whole page (since the next work
+ * is under control) for in-place decompression, as illustrated below:
+ * ________________________________________________________________
+ * | tail (partial) page | head (partial) page |
+ * | (of the current work) | (of the previous work) |
+ * | PRIMARY_FOLLOWED or | |
+ * |_____PRIMARY_HOOKED____|____________PRIMARY_FOLLOWED____________|
+ *
+ * [ (*) the above page can be used for the current work itself. ]
*/
Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED,
Z_EROFS_VLE_WORK_MAX
@@ -234,10 +249,10 @@
return ret ? 0 : -EAGAIN;
}
-static inline bool try_to_claim_workgroup(
- struct z_erofs_vle_workgroup *grp,
- z_erofs_vle_owned_workgrp_t *owned_head,
- bool *hosted)
+static enum z_erofs_vle_work_role
+try_to_claim_workgroup(struct z_erofs_vle_workgroup *grp,
+ z_erofs_vle_owned_workgrp_t *owned_head,
+ bool *hosted)
{
DBG_BUGON(*hosted == true);
@@ -251,6 +266,9 @@
*owned_head = grp;
*hosted = true;
+ /* lucky, I am the followee :) */
+ return Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
+
} else if (grp->next == Z_EROFS_VLE_WORKGRP_TAIL) {
/*
* type 2, link to the end of a existing open chain,
@@ -260,12 +278,11 @@
if (Z_EROFS_VLE_WORKGRP_TAIL != cmpxchg(&grp->next,
Z_EROFS_VLE_WORKGRP_TAIL, *owned_head))
goto retry;
-
*owned_head = Z_EROFS_VLE_WORKGRP_TAIL;
- } else
- return false; /* :( better luck next time */
+ return Z_EROFS_VLE_WORK_PRIMARY_HOOKED;
+ }
- return true; /* lucky, I am the followee :) */
+ return Z_EROFS_VLE_WORK_PRIMARY; /* :( better luck next time */
}
static struct z_erofs_vle_work *
@@ -337,12 +354,8 @@
*hosted = false;
if (!primary)
*role = Z_EROFS_VLE_WORK_SECONDARY;
- /* claim the workgroup if possible */
- else if (try_to_claim_workgroup(grp, owned_head, hosted))
- *role = Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED;
- else
- *role = Z_EROFS_VLE_WORK_PRIMARY;
-
+ else /* claim the workgroup if possible */
+ *role = try_to_claim_workgroup(grp, owned_head, hosted);
return work;
}
@@ -419,6 +432,9 @@
}
}
+#define builder_is_hooked(builder) \
+ ((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_HOOKED)
+
#define builder_is_followed(builder) \
((builder)->role >= Z_EROFS_VLE_WORK_PRIMARY_FOLLOWED)
@@ -583,7 +599,7 @@
struct z_erofs_vle_work_builder *const builder = &fe->builder;
const loff_t offset = page_offset(page);
- bool tight = builder_is_followed(builder);
+ bool tight = builder_is_hooked(builder);
struct z_erofs_vle_work *work = builder->work;
#ifdef EROFS_FS_HAS_MANAGED_CACHE
@@ -606,8 +622,12 @@
/* lucky, within the range of the current map_blocks */
if (offset + cur >= map->m_la &&
- offset + cur < map->m_la + map->m_llen)
+ offset + cur < map->m_la + map->m_llen) {
+ /* didn't get a valid unzip work previously (very rare) */
+ if (!builder->work)
+ goto restart_now;
goto hitted;
+ }
/* go ahead the next map_blocks */
debugln("%s: [out-of-range] pos %llu", __func__, offset + cur);
@@ -621,6 +641,7 @@
if (unlikely(err))
goto err_out;
+restart_now:
if (unlikely(!(map->m_flags & EROFS_MAP_MAPPED)))
goto hitted;
@@ -646,7 +667,7 @@
builder->role = Z_EROFS_VLE_WORK_PRIMARY;
#endif
- tight &= builder_is_followed(builder);
+ tight &= builder_is_hooked(builder);
work = builder->work;
hitted:
cur = end - min_t(unsigned, offset + end - map->m_la, end);
@@ -661,6 +682,9 @@
(tight ? Z_EROFS_PAGE_TYPE_EXCLUSIVE :
Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED));
+ if (cur)
+ tight &= builder_is_followed(builder);
+
retry:
err = z_erofs_vle_work_add_page(builder, page, page_type);
/* should allocate an additional staging page for pagevec */
@@ -901,11 +925,10 @@
if (llen > grp->llen)
llen = grp->llen;
- err = z_erofs_vle_unzip_fast_percpu(compressed_pages,
- clusterpages, pages, llen, work->pageofs,
- z_erofs_onlinepage_endio);
+ err = z_erofs_vle_unzip_fast_percpu(compressed_pages, clusterpages,
+ pages, llen, work->pageofs);
if (err != -ENOTSUPP)
- goto out_percpu;
+ goto out;
if (sparsemem_pages >= nr_pages)
goto skip_allocpage;
@@ -926,21 +949,7 @@
erofs_vunmap(vout, nr_pages);
out:
- for (i = 0; i < nr_pages; ++i) {
- page = pages[i];
- DBG_BUGON(page->mapping == NULL);
-
- /* recycle all individual staging pages */
- if (z_erofs_gather_if_stagingpage(page_pool, page))
- continue;
-
- if (unlikely(err < 0))
- SetPageError(page);
-
- z_erofs_onlinepage_endio(page);
- }
-
-out_percpu:
+ /* must handle all compressed pages before endding pages */
for (i = 0; i < clusterpages; ++i) {
page = compressed_pages[i];
@@ -954,6 +963,23 @@
WRITE_ONCE(compressed_pages[i], NULL);
}
+ for (i = 0; i < nr_pages; ++i) {
+ page = pages[i];
+ if (!page)
+ continue;
+
+ DBG_BUGON(page->mapping == NULL);
+
+ /* recycle all individual staging pages */
+ if (z_erofs_gather_if_stagingpage(page_pool, page))
+ continue;
+
+ if (unlikely(err < 0))
+ SetPageError(page);
+
+ z_erofs_onlinepage_endio(page);
+ }
+
if (pages == z_pagemap_global)
mutex_unlock(&z_pagemap_global_lock);
else if (unlikely(pages != pages_onstack))
diff --git a/drivers/staging/erofs/unzip_vle.h b/drivers/staging/erofs/unzip_vle.h
index 3316bc3..684ff06 100644
--- a/drivers/staging/erofs/unzip_vle.h
+++ b/drivers/staging/erofs/unzip_vle.h
@@ -218,8 +218,7 @@
extern int z_erofs_vle_unzip_fast_percpu(struct page **compressed_pages,
unsigned clusterpages, struct page **pages,
- unsigned outlen, unsigned short pageofs,
- void (*endio)(struct page *));
+ unsigned int outlen, unsigned short pageofs);
extern int z_erofs_vle_unzip_vmap(struct page **compressed_pages,
unsigned clusterpages, void *vaddr, unsigned llen,
diff --git a/drivers/staging/erofs/unzip_vle_lz4.c b/drivers/staging/erofs/unzip_vle_lz4.c
index 9cb35cd..055420e 100644
--- a/drivers/staging/erofs/unzip_vle_lz4.c
+++ b/drivers/staging/erofs/unzip_vle_lz4.c
@@ -105,8 +105,7 @@
unsigned clusterpages,
struct page **pages,
unsigned outlen,
- unsigned short pageofs,
- void (*endio)(struct page *))
+ unsigned short pageofs)
{
void *vin, *vout;
unsigned nr_pages, i, j;
@@ -128,31 +127,30 @@
ret = z_erofs_unzip_lz4(vin, vout + pageofs,
clusterpages * PAGE_SIZE, outlen);
- if (ret >= 0) {
- outlen = ret;
- ret = 0;
- }
+ if (ret < 0)
+ goto out;
+ ret = 0;
for (i = 0; i < nr_pages; ++i) {
j = min((unsigned)PAGE_SIZE - pageofs, outlen);
if (pages[i] != NULL) {
- if (ret < 0)
- SetPageError(pages[i]);
- else if (clusterpages == 1 && pages[i] == compressed_pages[0])
+ if (clusterpages == 1 &&
+ pages[i] == compressed_pages[0]) {
memcpy(vin + pageofs, vout + pageofs, j);
- else {
+ } else {
void *dst = kmap_atomic(pages[i]);
memcpy(dst + pageofs, vout + pageofs, j);
kunmap_atomic(dst);
}
- endio(pages[i]);
}
vout += PAGE_SIZE;
outlen -= j;
pageofs = 0;
}
+
+out:
preempt_enable();
if (clusterpages == 1)
diff --git a/drivers/staging/erofs/utils.c b/drivers/staging/erofs/utils.c
index dd2ac9d..2d96820 100644
--- a/drivers/staging/erofs/utils.c
+++ b/drivers/staging/erofs/utils.c
@@ -87,12 +87,21 @@
grp = (void *)((unsigned long)grp |
1UL << RADIX_TREE_EXCEPTIONAL_SHIFT);
- err = radix_tree_insert(&sbi->workstn_tree,
- grp->index, grp);
+ /*
+ * Bump up reference count before making this workgroup
+ * visible to other users in order to avoid potential UAF
+ * without serialized by erofs_workstn_lock.
+ */
+ __erofs_workgroup_get(grp);
- if (!err) {
- __erofs_workgroup_get(grp);
- }
+ err = radix_tree_insert(&sbi->workstn_tree,
+ grp->index, grp);
+ if (unlikely(err))
+ /*
+ * it's safe to decrease since the workgroup isn't visible
+ * and refcount >= 2 (cannot be freezed).
+ */
+ __erofs_workgroup_put(grp);
erofs_workstn_unlock(sbi);
radix_tree_preload_end();
@@ -101,19 +110,99 @@
extern void erofs_workgroup_free_rcu(struct erofs_workgroup *grp);
+static void __erofs_workgroup_free(struct erofs_workgroup *grp)
+{
+ atomic_long_dec(&erofs_global_shrink_cnt);
+ erofs_workgroup_free_rcu(grp);
+}
+
int erofs_workgroup_put(struct erofs_workgroup *grp)
{
int count = atomic_dec_return(&grp->refcount);
if (count == 1)
atomic_long_inc(&erofs_global_shrink_cnt);
- else if (!count) {
- atomic_long_dec(&erofs_global_shrink_cnt);
- erofs_workgroup_free_rcu(grp);
- }
+ else if (!count)
+ __erofs_workgroup_free(grp);
return count;
}
+#ifdef EROFS_FS_HAS_MANAGED_CACHE
+/* for cache-managed case, customized reclaim paths exist */
+static void erofs_workgroup_unfreeze_final(struct erofs_workgroup *grp)
+{
+ erofs_workgroup_unfreeze(grp, 0);
+ __erofs_workgroup_free(grp);
+}
+
+bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *grp,
+ bool cleanup)
+{
+ void *entry;
+
+ /*
+ * for managed cache enabled, the refcount of workgroups
+ * themselves could be < 0 (freezed). So there is no guarantee
+ * that all refcount > 0 if managed cache is enabled.
+ */
+ if (!erofs_workgroup_try_to_freeze(grp, 1))
+ return false;
+
+ /*
+ * note that all cached pages should be unlinked
+ * before delete it from the radix tree.
+ * Otherwise some cached pages of an orphan old workgroup
+ * could be still linked after the new one is available.
+ */
+ if (erofs_try_to_free_all_cached_pages(sbi, grp)) {
+ erofs_workgroup_unfreeze(grp, 1);
+ return false;
+ }
+
+ /*
+ * it is impossible to fail after the workgroup is freezed,
+ * however in order to avoid some race conditions, add a
+ * DBG_BUGON to observe this in advance.
+ */
+ entry = radix_tree_delete(&sbi->workstn_tree, grp->index);
+ DBG_BUGON((void *)((unsigned long)entry &
+ ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp);
+
+ /*
+ * if managed cache is enable, the last refcount
+ * should indicate the related workstation.
+ */
+ erofs_workgroup_unfreeze_final(grp);
+ return true;
+}
+
+#else
+/* for nocache case, no customized reclaim path at all */
+bool erofs_try_to_release_workgroup(struct erofs_sb_info *sbi,
+ struct erofs_workgroup *grp,
+ bool cleanup)
+{
+ int cnt = atomic_read(&grp->refcount);
+ void *entry;
+
+ DBG_BUGON(cnt <= 0);
+ DBG_BUGON(cleanup && cnt != 1);
+
+ if (cnt > 1)
+ return false;
+
+ entry = radix_tree_delete(&sbi->workstn_tree, grp->index);
+ DBG_BUGON((void *)((unsigned long)entry &
+ ~RADIX_TREE_EXCEPTIONAL_ENTRY) != grp);
+
+ /* (rarely) could be grabbed again when freeing */
+ erofs_workgroup_put(grp);
+ return true;
+}
+
+#endif
+
unsigned long erofs_shrink_workstation(struct erofs_sb_info *sbi,
unsigned long nr_shrink,
bool cleanup)
@@ -130,44 +219,16 @@
batch, first_index, PAGEVEC_SIZE);
for (i = 0; i < found; ++i) {
- int cnt;
struct erofs_workgroup *grp = (void *)
((unsigned long)batch[i] &
~RADIX_TREE_EXCEPTIONAL_ENTRY);
first_index = grp->index + 1;
- cnt = atomic_read(&grp->refcount);
- BUG_ON(cnt <= 0);
-
- if (cleanup)
- BUG_ON(cnt != 1);
-
-#ifndef EROFS_FS_HAS_MANAGED_CACHE
- else if (cnt > 1)
-#else
- if (!erofs_workgroup_try_to_freeze(grp, 1))
-#endif
+ /* try to shrink each valid workgroup */
+ if (!erofs_try_to_release_workgroup(sbi, grp, cleanup))
continue;
- if (radix_tree_delete(&sbi->workstn_tree,
- grp->index) != grp) {
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
-skip:
- erofs_workgroup_unfreeze(grp, 1);
-#endif
- continue;
- }
-
-#ifdef EROFS_FS_HAS_MANAGED_CACHE
- if (erofs_try_to_free_all_cached_pages(sbi, grp))
- goto skip;
-
- erofs_workgroup_unfreeze(grp, 1);
-#endif
- /* (rarely) grabbed again when freeing */
- erofs_workgroup_put(grp);
-
++freed;
if (unlikely(!--nr_shrink))
break;
diff --git a/drivers/staging/erofs/xattr.c b/drivers/staging/erofs/xattr.c
index 0e9cfec..2db99cf 100644
--- a/drivers/staging/erofs/xattr.c
+++ b/drivers/staging/erofs/xattr.c
@@ -24,36 +24,77 @@
static inline void xattr_iter_end(struct xattr_iter *it, bool atomic)
{
- /* only init_inode_xattrs use non-atomic once */
+ /* the only user of kunmap() is 'init_inode_xattrs' */
if (unlikely(!atomic))
kunmap(it->page);
else
kunmap_atomic(it->kaddr);
+
unlock_page(it->page);
put_page(it->page);
}
-static void init_inode_xattrs(struct inode *inode)
+static inline void xattr_iter_end_final(struct xattr_iter *it)
{
+ if (!it->page)
+ return;
+
+ xattr_iter_end(it, true);
+}
+
+static int init_inode_xattrs(struct inode *inode)
+{
+ struct erofs_vnode *const vi = EROFS_V(inode);
struct xattr_iter it;
unsigned i;
struct erofs_xattr_ibody_header *ih;
struct erofs_sb_info *sbi;
- struct erofs_vnode *vi;
bool atomic_map;
+ int ret = 0;
- if (likely(inode_has_inited_xattr(inode)))
- return;
+ /* the most case is that xattrs of this inode are initialized. */
+ if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
+ return 0;
- vi = EROFS_V(inode);
- BUG_ON(!vi->xattr_isize);
+ if (wait_on_bit_lock(&vi->flags, EROFS_V_BL_XATTR_BIT, TASK_KILLABLE))
+ return -ERESTARTSYS;
+
+ /* someone has initialized xattrs for us? */
+ if (test_bit(EROFS_V_EA_INITED_BIT, &vi->flags))
+ goto out_unlock;
+
+ /*
+ * bypass all xattr operations if ->xattr_isize is not greater than
+ * sizeof(struct erofs_xattr_ibody_header), in detail:
+ * 1) it is not enough to contain erofs_xattr_ibody_header then
+ * ->xattr_isize should be 0 (it means no xattr);
+ * 2) it is just to contain erofs_xattr_ibody_header, which is on-disk
+ * undefined right now (maybe use later with some new sb feature).
+ */
+ if (vi->xattr_isize == sizeof(struct erofs_xattr_ibody_header)) {
+ errln("xattr_isize %d of nid %llu is not supported yet",
+ vi->xattr_isize, vi->nid);
+ ret = -ENOTSUPP;
+ goto out_unlock;
+ } else if (vi->xattr_isize < sizeof(struct erofs_xattr_ibody_header)) {
+ if (unlikely(vi->xattr_isize)) {
+ DBG_BUGON(1);
+ ret = -EIO;
+ goto out_unlock; /* xattr ondisk layout error */
+ }
+ ret = -ENOATTR;
+ goto out_unlock;
+ }
sbi = EROFS_I_SB(inode);
it.blkaddr = erofs_blknr(iloc(sbi, vi->nid) + vi->inode_isize);
it.ofs = erofs_blkoff(iloc(sbi, vi->nid) + vi->inode_isize);
it.page = erofs_get_inline_page(inode, it.blkaddr);
- BUG_ON(IS_ERR(it.page));
+ if (IS_ERR(it.page)) {
+ ret = PTR_ERR(it.page);
+ goto out_unlock;
+ }
/* read in shared xattr array (non-atomic, see kmalloc below) */
it.kaddr = kmap(it.page);
@@ -62,9 +103,13 @@
ih = (struct erofs_xattr_ibody_header *)(it.kaddr + it.ofs);
vi->xattr_shared_count = ih->h_shared_count;
- vi->xattr_shared_xattrs = (unsigned *)kmalloc_array(
- vi->xattr_shared_count, sizeof(unsigned),
- GFP_KERNEL | __GFP_NOFAIL);
+ vi->xattr_shared_xattrs = kmalloc_array(vi->xattr_shared_count,
+ sizeof(uint), GFP_KERNEL);
+ if (!vi->xattr_shared_xattrs) {
+ xattr_iter_end(&it, atomic_map);
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
/* let's skip ibody header */
it.ofs += sizeof(struct erofs_xattr_ibody_header);
@@ -77,7 +122,12 @@
it.page = erofs_get_meta_page(inode->i_sb,
++it.blkaddr, S_ISDIR(inode->i_mode));
- BUG_ON(IS_ERR(it.page));
+ if (IS_ERR(it.page)) {
+ kfree(vi->xattr_shared_xattrs);
+ vi->xattr_shared_xattrs = NULL;
+ ret = PTR_ERR(it.page);
+ goto out_unlock;
+ }
it.kaddr = kmap_atomic(it.page);
atomic_map = true;
@@ -89,7 +139,11 @@
}
xattr_iter_end(&it, atomic_map);
- inode_set_inited_xattr(inode);
+ set_bit(EROFS_V_EA_INITED_BIT, &vi->flags);
+
+out_unlock:
+ clear_and_wake_up_bit(EROFS_V_BL_XATTR_BIT, &vi->flags);
+ return ret;
}
struct xattr_iter_handlers {
@@ -99,18 +153,25 @@
void (*value)(struct xattr_iter *, unsigned, char *, unsigned);
};
-static void xattr_iter_fixup(struct xattr_iter *it)
+static inline int xattr_iter_fixup(struct xattr_iter *it)
{
- if (unlikely(it->ofs >= EROFS_BLKSIZ)) {
- xattr_iter_end(it, true);
+ if (it->ofs < EROFS_BLKSIZ)
+ return 0;
- it->blkaddr += erofs_blknr(it->ofs);
- it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
- BUG_ON(IS_ERR(it->page));
+ xattr_iter_end(it, true);
- it->kaddr = kmap_atomic(it->page);
- it->ofs = erofs_blkoff(it->ofs);
+ it->blkaddr += erofs_blknr(it->ofs);
+ it->page = erofs_get_meta_page(it->sb, it->blkaddr, false);
+ if (IS_ERR(it->page)) {
+ int err = PTR_ERR(it->page);
+
+ it->page = NULL;
+ return err;
}
+
+ it->kaddr = kmap_atomic(it->page);
+ it->ofs = erofs_blkoff(it->ofs);
+ return 0;
}
static int inline_xattr_iter_begin(struct xattr_iter *it,
@@ -132,21 +193,24 @@
it->ofs = erofs_blkoff(iloc(sbi, vi->nid) + inline_xattr_ofs);
it->page = erofs_get_inline_page(inode, it->blkaddr);
- BUG_ON(IS_ERR(it->page));
- it->kaddr = kmap_atomic(it->page);
+ if (IS_ERR(it->page))
+ return PTR_ERR(it->page);
+ it->kaddr = kmap_atomic(it->page);
return vi->xattr_isize - xattr_header_sz;
}
static int xattr_foreach(struct xattr_iter *it,
- struct xattr_iter_handlers *op, unsigned *tlimit)
+ const struct xattr_iter_handlers *op, unsigned int *tlimit)
{
struct erofs_xattr_entry entry;
unsigned value_sz, processed, slice;
int err;
/* 0. fixup blkaddr, ofs, ipage */
- xattr_iter_fixup(it);
+ err = xattr_iter_fixup(it);
+ if (err)
+ return err;
/*
* 1. read xattr entry to the memory,
@@ -178,7 +242,9 @@
if (it->ofs >= EROFS_BLKSIZ) {
BUG_ON(it->ofs > EROFS_BLKSIZ);
- xattr_iter_fixup(it);
+ err = xattr_iter_fixup(it);
+ if (err)
+ goto out;
it->ofs = 0;
}
@@ -210,7 +276,10 @@
while (processed < value_sz) {
if (it->ofs >= EROFS_BLKSIZ) {
BUG_ON(it->ofs > EROFS_BLKSIZ);
- xattr_iter_fixup(it);
+
+ err = xattr_iter_fixup(it);
+ if (err)
+ goto out;
it->ofs = 0;
}
@@ -270,7 +339,7 @@
memcpy(it->buffer + processed, buf, len);
}
-static struct xattr_iter_handlers find_xattr_handlers = {
+static const struct xattr_iter_handlers find_xattr_handlers = {
.entry = xattr_entrymatch,
.name = xattr_namematch,
.alloc_buffer = xattr_checkbuffer,
@@ -291,8 +360,11 @@
ret = xattr_foreach(&it->it, &find_xattr_handlers, &remaining);
if (ret >= 0)
break;
+
+ if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
+ break;
}
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
return ret < 0 ? ret : it->buffer_size;
}
@@ -315,8 +387,10 @@
xattr_iter_end(&it->it, true);
it->it.page = erofs_get_meta_page(inode->i_sb,
- blkaddr, false);
- BUG_ON(IS_ERR(it->it.page));
+ blkaddr, false);
+ if (IS_ERR(it->it.page))
+ return PTR_ERR(it->it.page);
+
it->it.kaddr = kmap_atomic(it->it.page);
it->it.blkaddr = blkaddr;
}
@@ -324,9 +398,12 @@
ret = xattr_foreach(&it->it, &find_xattr_handlers, NULL);
if (ret >= 0)
break;
+
+ if (ret != -ENOATTR) /* -ENOMEM, -EIO, etc. */
+ break;
}
if (vi->xattr_shared_count)
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
return ret < 0 ? ret : it->buffer_size;
}
@@ -351,7 +428,9 @@
if (unlikely(name == NULL))
return -EINVAL;
- init_inode_xattrs(inode);
+ ret = init_inode_xattrs(inode);
+ if (ret)
+ return ret;
it.index = index;
@@ -374,7 +453,6 @@
struct dentry *unused, struct inode *inode,
const char *name, void *buffer, size_t size)
{
- struct erofs_vnode *const vi = EROFS_V(inode);
struct erofs_sb_info *const sbi = EROFS_I_SB(inode);
switch (handler->flags) {
@@ -392,9 +470,6 @@
return -EINVAL;
}
- if (!vi->xattr_isize)
- return -ENOATTR;
-
return erofs_getxattr(inode, handler->flags, name, buffer, size);
}
@@ -494,7 +569,7 @@
return 1;
}
-static struct xattr_iter_handlers list_xattr_handlers = {
+static const struct xattr_iter_handlers list_xattr_handlers = {
.entry = xattr_entrylist,
.name = xattr_namelist,
.alloc_buffer = xattr_skipvalue,
@@ -516,7 +591,7 @@
if (ret < 0)
break;
}
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
return ret < 0 ? ret : it->buffer_ofs;
}
@@ -538,8 +613,10 @@
xattr_iter_end(&it->it, true);
it->it.page = erofs_get_meta_page(inode->i_sb,
- blkaddr, false);
- BUG_ON(IS_ERR(it->it.page));
+ blkaddr, false);
+ if (IS_ERR(it->it.page))
+ return PTR_ERR(it->it.page);
+
it->it.kaddr = kmap_atomic(it->it.page);
it->it.blkaddr = blkaddr;
}
@@ -549,7 +626,7 @@
break;
}
if (vi->xattr_shared_count)
- xattr_iter_end(&it->it, true);
+ xattr_iter_end_final(&it->it);
return ret < 0 ? ret : it->buffer_ofs;
}
@@ -560,7 +637,9 @@
int ret;
struct listxattr_iter it;
- init_inode_xattrs(d_inode(dentry));
+ ret = init_inode_xattrs(d_inode(dentry));
+ if (ret)
+ return ret;
it.dentry = dentry;
it.buffer = buffer;
diff --git a/drivers/staging/wilc1000/linux_wlan.c b/drivers/staging/wilc1000/linux_wlan.c
index 3b8d237..649caae 100644
--- a/drivers/staging/wilc1000/linux_wlan.c
+++ b/drivers/staging/wilc1000/linux_wlan.c
@@ -1090,8 +1090,8 @@
vif->wilc = *wilc;
vif->ndev = ndev;
wl->vif[i] = vif;
- wl->vif_num = i;
- vif->idx = wl->vif_num;
+ wl->vif_num = i + 1;
+ vif->idx = i;
ndev->netdev_ops = &wilc_netdev_ops;
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 09bf6b4..1493d0f 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -187,6 +187,7 @@
xhci->quirks |= XHCI_SSIC_PORT_UNUSED;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI))
xhci->quirks |= XHCI_INTEL_USB_ROLE_SW;
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
diff --git a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig
index 1a0cf5d..f87c991 100644
--- a/drivers/usb/phy/Kconfig
+++ b/drivers/usb/phy/Kconfig
@@ -21,7 +21,7 @@
config FSL_USB2_OTG
bool "Freescale USB OTG Transceiver Driver"
- depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM && PM
+ depends on USB_EHCI_FSL && USB_FSL_USB2 && USB_OTG_FSM=y && PM
depends on USB_GADGET || !USB_GADGET # if USB_GADGET=m, this can't be 'y'
select USB_PHY
help
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index c0777a3..4c66edf 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,7 @@
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ { USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
{ USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */
@@ -1353,8 +1354,13 @@
if (priv->partnum == CP210X_PARTNUM_CP2105)
req_type = REQTYPE_INTERFACE_TO_HOST;
+ result = usb_autopm_get_interface(serial->interface);
+ if (result)
+ return result;
+
result = cp210x_read_vendor_block(serial, req_type,
CP210X_READ_LATCH, &buf, sizeof(buf));
+ usb_autopm_put_interface(serial->interface);
if (result < 0)
return result;
@@ -1375,6 +1381,10 @@
buf.mask = BIT(gpio);
+ result = usb_autopm_get_interface(serial->interface);
+ if (result)
+ goto out;
+
if (priv->partnum == CP210X_PARTNUM_CP2105) {
result = cp210x_write_vendor_block(serial,
REQTYPE_HOST_TO_INTERFACE,
@@ -1392,6 +1402,8 @@
NULL, 0, USB_CTRL_SET_TIMEOUT);
}
+ usb_autopm_put_interface(serial->interface);
+out:
if (result < 0) {
dev_err(&serial->interface->dev, "failed to set GPIO value: %d\n",
result);
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index b5cef32..1d8077e 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -1015,6 +1015,8 @@
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_BT_USB_PID) },
{ USB_DEVICE(CYPRESS_VID, CYPRESS_WICED_WL_USB_PID) },
{ USB_DEVICE(AIRBUS_DS_VID, AIRBUS_DS_P8GR) },
+ /* EZPrototypes devices */
+ { USB_DEVICE(EZPROTOTYPES_VID, HJELMSLUND_USB485_ISO_PID) },
{ } /* Terminating entry */
};
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index 975d026..b863bed 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -1309,6 +1309,12 @@
#define IONICS_PLUGCOMPUTER_PID 0x0102
/*
+ * EZPrototypes (PID reseller)
+ */
+#define EZPROTOTYPES_VID 0x1c40
+#define HJELMSLUND_USB485_ISO_PID 0x0477
+
+/*
* Dresden Elektronik Sensor Terminal Board
*/
#define DE_VID 0x1cf1 /* Vendor ID */
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index fb54434..faf833e 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -1148,6 +1148,8 @@
.driver_info = NCTRL(0) | RSVD(1) | RSVD(3) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_ME910_DUAL_MODEM),
.driver_info = NCTRL(0) | RSVD(3) },
+ { USB_DEVICE_INTERFACE_CLASS(TELIT_VENDOR_ID, 0x1102, 0xff), /* Telit ME910 (ECM) */
+ .driver_info = NCTRL(0) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910),
.driver_info = NCTRL(0) | RSVD(1) | RSVD(2) },
{ USB_DEVICE(TELIT_VENDOR_ID, TELIT_PRODUCT_LE910_USBCFG4),
diff --git a/drivers/vhost/vsock.c b/drivers/vhost/vsock.c
index fa93f67..e440f87 100644
--- a/drivers/vhost/vsock.c
+++ b/drivers/vhost/vsock.c
@@ -642,7 +642,7 @@
hash_del_rcu(&vsock->hash);
vsock->guest_cid = guest_cid;
- hash_add_rcu(vhost_vsock_hash, &vsock->hash, guest_cid);
+ hash_add_rcu(vhost_vsock_hash, &vsock->hash, vsock->guest_cid);
spin_unlock_bh(&vhost_vsock_lock);
return 0;
diff --git a/fs/aio.c b/fs/aio.c
index 44551d9..45d5ef8 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -1661,6 +1661,7 @@
struct poll_iocb *req = container_of(wait, struct poll_iocb, wait);
struct aio_kiocb *iocb = container_of(req, struct aio_kiocb, poll);
__poll_t mask = key_to_poll(key);
+ unsigned long flags;
req->woken = true;
@@ -1669,10 +1670,15 @@
if (!(mask & req->events))
return 0;
- /* try to complete the iocb inline if we can: */
- if (spin_trylock(&iocb->ki_ctx->ctx_lock)) {
+ /*
+ * Try to complete the iocb inline if we can. Use
+ * irqsave/irqrestore because not all filesystems (e.g. fuse)
+ * call this function with IRQs disabled and because IRQs
+ * have to be disabled before ctx_lock is obtained.
+ */
+ if (spin_trylock_irqsave(&iocb->ki_ctx->ctx_lock, flags)) {
list_del(&iocb->ki_list);
- spin_unlock(&iocb->ki_ctx->ctx_lock);
+ spin_unlock_irqrestore(&iocb->ki_ctx->ctx_lock, flags);
list_del_init(&req->wait.entry);
aio_poll_complete(iocb, mask);
diff --git a/fs/autofs/expire.c b/fs/autofs/expire.c
index d441244..28d9c2b 100644
--- a/fs/autofs/expire.c
+++ b/fs/autofs/expire.c
@@ -596,7 +596,6 @@
pkt.len = dentry->d_name.len;
memcpy(pkt.name, dentry->d_name.name, pkt.len);
pkt.name[pkt.len] = '\0';
- dput(dentry);
if (copy_to_user(pkt_p, &pkt, sizeof(struct autofs_packet_expire)))
ret = -EFAULT;
@@ -609,6 +608,8 @@
complete_all(&ino->expire_complete);
spin_unlock(&sbi->fs_lock);
+ dput(dentry);
+
return ret;
}
diff --git a/fs/autofs/inode.c b/fs/autofs/inode.c
index 846c052..3c14a8e 100644
--- a/fs/autofs/inode.c
+++ b/fs/autofs/inode.c
@@ -255,8 +255,10 @@
}
root_inode = autofs_get_inode(s, S_IFDIR | 0755);
root = d_make_root(root_inode);
- if (!root)
+ if (!root) {
+ ret = -ENOMEM;
goto fail_ino;
+ }
pipe = NULL;
root->d_fsdata = ino;
diff --git a/fs/buffer.c b/fs/buffer.c
index 6f1ae3a..c083c4b 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -200,6 +200,7 @@
struct buffer_head *head;
struct page *page;
int all_mapped = 1;
+ static DEFINE_RATELIMIT_STATE(last_warned, HZ, 1);
index = block >> (PAGE_SHIFT - bd_inode->i_blkbits);
page = find_get_page_flags(bd_mapping, index, FGP_ACCESSED);
@@ -227,15 +228,15 @@
* file io on the block device and getblk. It gets dealt with
* elsewhere, don't buffer_error if we had some unmapped buffers
*/
- if (all_mapped) {
- printk("__find_get_block_slow() failed. "
- "block=%llu, b_blocknr=%llu\n",
- (unsigned long long)block,
- (unsigned long long)bh->b_blocknr);
- printk("b_state=0x%08lx, b_size=%zu\n",
- bh->b_state, bh->b_size);
- printk("device %pg blocksize: %d\n", bdev,
- 1 << bd_inode->i_blkbits);
+ ratelimit_set_flags(&last_warned, RATELIMIT_MSG_ON_RELEASE);
+ if (all_mapped && __ratelimit(&last_warned)) {
+ printk("__find_get_block_slow() failed. block=%llu, "
+ "b_blocknr=%llu, b_state=0x%08lx, b_size=%zu, "
+ "device %pg blocksize: %d\n",
+ (unsigned long long)block,
+ (unsigned long long)bh->b_blocknr,
+ bh->b_state, bh->b_size, bdev,
+ 1 << bd_inode->i_blkbits);
}
out_unlock:
spin_unlock(&bd_mapping->private_lock);
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index 1e5a117..a2d7017 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -2243,10 +2243,12 @@
{
int i;
- cifs_small_buf_release(rqst->rq_iov[0].iov_base);
- for (i = 1; i < rqst->rq_nvec; i++)
- if (rqst->rq_iov[i].iov_base != smb2_padding)
- kfree(rqst->rq_iov[i].iov_base);
+ if (rqst && rqst->rq_iov) {
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base);
+ for (i = 1; i < rqst->rq_nvec; i++)
+ if (rqst->rq_iov[i].iov_base != smb2_padding)
+ kfree(rqst->rq_iov[i].iov_base);
+ }
}
int
@@ -2535,7 +2537,8 @@
void
SMB2_close_free(struct smb_rqst *rqst)
{
- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ if (rqst && rqst->rq_iov)
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
int
@@ -2685,7 +2688,8 @@
void
SMB2_query_info_free(struct smb_rqst *rqst)
{
- cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
+ if (rqst && rqst->rq_iov)
+ cifs_small_buf_release(rqst->rq_iov[0].iov_base); /* request */
}
static int
diff --git a/fs/cifs/smb2pdu.h b/fs/cifs/smb2pdu.h
index 8fb7887..437257d 100644
--- a/fs/cifs/smb2pdu.h
+++ b/fs/cifs/smb2pdu.h
@@ -84,8 +84,8 @@
#define NUMBER_OF_SMB2_COMMANDS 0x0013
-/* 4 len + 52 transform hdr + 64 hdr + 56 create rsp */
-#define MAX_SMB2_HDR_SIZE 0x00b0
+/* 52 transform hdr + 64 hdr + 88 create rsp */
+#define MAX_SMB2_HDR_SIZE 204
#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe)
#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
diff --git a/fs/drop_caches.c b/fs/drop_caches.c
index 8237701..d31b6c7 100644
--- a/fs/drop_caches.c
+++ b/fs/drop_caches.c
@@ -21,8 +21,13 @@
spin_lock(&sb->s_inode_list_lock);
list_for_each_entry(inode, &sb->s_inodes, i_sb_list) {
spin_lock(&inode->i_lock);
+ /*
+ * We must skip inodes in unusual state. We may also skip
+ * inodes without pages but we deliberately won't in case
+ * we need to reschedule to avoid softlockups.
+ */
if ((inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW)) ||
- (inode->i_mapping->nrpages == 0)) {
+ (inode->i_mapping->nrpages == 0 && !need_resched())) {
spin_unlock(&inode->i_lock);
continue;
}
@@ -30,6 +35,7 @@
spin_unlock(&inode->i_lock);
spin_unlock(&sb->s_inode_list_lock);
+ cond_resched();
invalidate_mapping_pages(inode->i_mapping, 0, -1);
iput(toput_inode);
toput_inode = inode;
diff --git a/fs/exec.c b/fs/exec.c
index c7e3417..77c03ce 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -929,7 +929,7 @@
bytes = kernel_read(file, *buf + pos, i_size - pos, &pos);
if (bytes < 0) {
ret = bytes;
- goto out;
+ goto out_free;
}
if (bytes == 0)
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 4614ee2..9d566e6 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -107,7 +107,7 @@
static wait_queue_head_t *glock_waitqueue(struct lm_lockname *name)
{
- u32 hash = jhash2((u32 *)name, sizeof(*name) / 4, 0);
+ u32 hash = jhash2((u32 *)name, ht_parms.key_len / 4, 0);
return glock_wait_table + hash_32(hash, GLOCK_WAIT_TABLE_BITS);
}
diff --git a/fs/iomap.c b/fs/iomap.c
index e57fb1e..fac4520 100644
--- a/fs/iomap.c
+++ b/fs/iomap.c
@@ -117,6 +117,12 @@
atomic_set(&iop->read_count, 0);
atomic_set(&iop->write_count, 0);
bitmap_zero(iop->uptodate, PAGE_SIZE / SECTOR_SIZE);
+
+ /*
+ * migrate_page_move_mapping() assumes that pages with private data have
+ * their count elevated by 1.
+ */
+ get_page(page);
set_page_private(page, (unsigned long)iop);
SetPagePrivate(page);
return iop;
@@ -133,6 +139,7 @@
WARN_ON_ONCE(atomic_read(&iop->write_count));
ClearPagePrivate(page);
set_page_private(page, 0);
+ put_page(page);
kfree(iop);
}
@@ -565,8 +572,10 @@
if (page_has_private(page)) {
ClearPagePrivate(page);
+ get_page(newpage);
set_page_private(newpage, page_private(page));
set_page_private(page, 0);
+ put_page(page);
SetPagePrivate(newpage);
}
@@ -1778,6 +1787,7 @@
loff_t pos = iocb->ki_pos, start = pos;
loff_t end = iocb->ki_pos + count - 1, ret = 0;
unsigned int flags = IOMAP_DIRECT;
+ bool wait_for_completion = is_sync_kiocb(iocb);
struct blk_plug plug;
struct iomap_dio *dio;
@@ -1797,7 +1807,6 @@
dio->end_io = end_io;
dio->error = 0;
dio->flags = 0;
- dio->wait_for_completion = is_sync_kiocb(iocb);
dio->submit.iter = iter;
dio->submit.waiter = current;
@@ -1852,7 +1861,7 @@
dio_warn_stale_pagecache(iocb->ki_filp);
ret = 0;
- if (iov_iter_rw(iter) == WRITE && !dio->wait_for_completion &&
+ if (iov_iter_rw(iter) == WRITE && !wait_for_completion &&
!inode->i_sb->s_dio_done_wq) {
ret = sb_init_dio_done_wq(inode->i_sb);
if (ret < 0)
@@ -1868,7 +1877,7 @@
if (ret <= 0) {
/* magic error code to fall back to buffered I/O */
if (ret == -ENOTBLK) {
- dio->wait_for_completion = true;
+ wait_for_completion = true;
ret = 0;
}
break;
@@ -1890,8 +1899,24 @@
if (dio->flags & IOMAP_DIO_WRITE_FUA)
dio->flags &= ~IOMAP_DIO_NEED_SYNC;
+ /*
+ * We are about to drop our additional submission reference, which
+ * might be the last reference to the dio. There are three three
+ * different ways we can progress here:
+ *
+ * (a) If this is the last reference we will always complete and free
+ * the dio ourselves.
+ * (b) If this is not the last reference, and we serve an asynchronous
+ * iocb, we must never touch the dio after the decrement, the
+ * I/O completion handler will complete and free it.
+ * (c) If this is not the last reference, but we serve a synchronous
+ * iocb, the I/O completion handler will wake us up on the drop
+ * of the final reference, and we will complete and free it here
+ * after we got woken by the I/O completion handler.
+ */
+ dio->wait_for_completion = wait_for_completion;
if (!atomic_dec_and_test(&dio->ref)) {
- if (!dio->wait_for_completion)
+ if (!wait_for_completion)
return -EIOCBQUEUED;
for (;;) {
@@ -1908,9 +1933,7 @@
__set_current_state(TASK_RUNNING);
}
- ret = iomap_dio_complete(dio);
-
- return ret;
+ return iomap_dio_complete(dio);
out_free_dio:
kfree(dio);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 5ef2c71..6b666d1 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -1906,6 +1906,11 @@
size_t len;
char *end;
+ if (unlikely(!dev_name || !*dev_name)) {
+ dfprintk(MOUNT, "NFS: device name not specified\n");
+ return -EINVAL;
+ }
+
/* Is the host name protected with square brakcets? */
if (*dev_name == '[') {
end = strchr(++dev_name, ']');
diff --git a/fs/overlayfs/copy_up.c b/fs/overlayfs/copy_up.c
index 1cc797a..e6b5d62 100644
--- a/fs/overlayfs/copy_up.c
+++ b/fs/overlayfs/copy_up.c
@@ -829,7 +829,7 @@
dput(parent);
dput(next);
}
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return err;
}
diff --git a/fs/overlayfs/dir.c b/fs/overlayfs/dir.c
index b2aadd3..2e4af5f 100644
--- a/fs/overlayfs/dir.c
+++ b/fs/overlayfs/dir.c
@@ -567,7 +567,8 @@
override_cred->fsgid = inode->i_gid;
if (!attr->hardlink) {
err = security_dentry_create_files_as(dentry,
- attr->mode, &dentry->d_name, old_cred,
+ attr->mode, &dentry->d_name,
+ old_cred ? old_cred : current_cred(),
override_cred);
if (err) {
put_cred(override_cred);
@@ -583,7 +584,7 @@
err = ovl_create_over_whiteout(dentry, inode, attr);
}
out_revert_creds:
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return err;
}
@@ -659,7 +660,7 @@
old_cred = ovl_override_creds(dentry->d_sb);
err = ovl_set_redirect(dentry, false);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return err;
}
@@ -857,7 +858,7 @@
err = ovl_remove_upper(dentry, is_dir, &list);
else
err = ovl_remove_and_whiteout(dentry, &list);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
if (!err) {
if (is_dir)
clear_nlink(dentry->d_inode);
@@ -1225,7 +1226,7 @@
out_unlock:
unlock_rename(new_upperdir, old_upperdir);
out_revert_creds:
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
ovl_nlink_end(new, locked);
out_drop_write:
ovl_drop_write(old);
diff --git a/fs/overlayfs/file.c b/fs/overlayfs/file.c
index 986313d..da7d785 100644
--- a/fs/overlayfs/file.c
+++ b/fs/overlayfs/file.c
@@ -33,7 +33,7 @@
old_cred = ovl_override_creds(inode->i_sb);
realfile = open_with_fake_path(&file->f_path, file->f_flags | O_NOATIME,
realinode, current_cred());
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
pr_debug("open(%p[%pD2/%c], 0%o) -> (%p, 0%o)\n",
file, file, ovl_whatisit(inode, realinode), file->f_flags,
@@ -208,7 +208,7 @@
old_cred = ovl_override_creds(file_inode(file)->i_sb);
ret = vfs_iter_read(real.file, iter, &iocb->ki_pos,
ovl_iocb_to_rwf(iocb));
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
ovl_file_accessed(file);
@@ -244,7 +244,7 @@
ret = vfs_iter_write(real.file, iter, &iocb->ki_pos,
ovl_iocb_to_rwf(iocb));
file_end_write(real.file);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
/* Update size */
ovl_copyattr(ovl_inode_real(inode), inode);
@@ -271,7 +271,7 @@
if (file_inode(real.file) == ovl_inode_upper(file_inode(file))) {
old_cred = ovl_override_creds(file_inode(file)->i_sb);
ret = vfs_fsync_range(real.file, start, end, datasync);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
}
fdput(real);
@@ -295,7 +295,7 @@
old_cred = ovl_override_creds(file_inode(file)->i_sb);
ret = call_mmap(vma->vm_file, vma);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
if (ret) {
/* Drop reference count from new vm_file value */
@@ -323,7 +323,7 @@
old_cred = ovl_override_creds(file_inode(file)->i_sb);
ret = vfs_fallocate(real.file, mode, offset, len);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
/* Update size */
ovl_copyattr(ovl_inode_real(inode), inode);
@@ -345,7 +345,7 @@
old_cred = ovl_override_creds(file_inode(file)->i_sb);
ret = vfs_fadvise(real.file, offset, len, advice);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
fdput(real);
@@ -365,7 +365,7 @@
old_cred = ovl_override_creds(file_inode(file)->i_sb);
ret = vfs_ioctl(real.file, cmd, arg);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
fdput(real);
@@ -470,7 +470,7 @@
real_out.file, pos_out, len);
break;
}
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
/* Update size */
ovl_copyattr(ovl_inode_real(inode_out), inode_out);
diff --git a/fs/overlayfs/inode.c b/fs/overlayfs/inode.c
index 3b7ed5d..b3c6126 100644
--- a/fs/overlayfs/inode.c
+++ b/fs/overlayfs/inode.c
@@ -64,7 +64,7 @@
inode_lock(upperdentry->d_inode);
old_cred = ovl_override_creds(dentry->d_sb);
err = notify_change(upperdentry, attr, NULL);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
if (!err)
ovl_copyattr(upperdentry->d_inode, dentry->d_inode);
inode_unlock(upperdentry->d_inode);
@@ -260,7 +260,7 @@
stat->nlink = dentry->d_inode->i_nlink;
out:
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return err;
}
@@ -294,7 +294,7 @@
mask |= MAY_READ;
}
err = inode_permission(realinode, mask);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return err;
}
@@ -311,7 +311,7 @@
old_cred = ovl_override_creds(dentry->d_sb);
p = vfs_get_link(ovl_dentry_real(dentry), done);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return p;
}
@@ -354,7 +354,7 @@
WARN_ON(flags != XATTR_REPLACE);
err = vfs_removexattr(realdentry, name);
}
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
/* copy c/mtime */
ovl_copyattr(d_inode(realdentry), inode);
@@ -375,7 +375,7 @@
old_cred = ovl_override_creds(dentry->d_sb);
res = vfs_getxattr(realdentry, name, value, size);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return res;
}
@@ -399,7 +399,7 @@
old_cred = ovl_override_creds(dentry->d_sb);
res = vfs_listxattr(realdentry, list, size);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
if (res <= 0 || size == 0)
return res;
@@ -434,7 +434,7 @@
old_cred = ovl_override_creds(inode->i_sb);
acl = get_acl(realinode, type);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return acl;
}
@@ -472,7 +472,7 @@
filemap_write_and_wait(realinode->i_mapping);
err = realinode->i_op->fiemap(realinode, fieinfo, start, len);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return err;
}
diff --git a/fs/overlayfs/namei.c b/fs/overlayfs/namei.c
index efd3723..2fd199e 100644
--- a/fs/overlayfs/namei.c
+++ b/fs/overlayfs/namei.c
@@ -1069,7 +1069,7 @@
goto out_free_oe;
}
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
if (origin_path) {
dput(origin_path->dentry);
kfree(origin_path);
@@ -1096,7 +1096,7 @@
kfree(upperredirect);
out:
kfree(d.redirect);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return ERR_PTR(err);
}
@@ -1150,7 +1150,7 @@
dput(this);
}
}
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return positive;
}
diff --git a/fs/overlayfs/overlayfs.h b/fs/overlayfs/overlayfs.h
index a3c0d95..552a19a 100644
--- a/fs/overlayfs/overlayfs.h
+++ b/fs/overlayfs/overlayfs.h
@@ -208,6 +208,7 @@
void ovl_drop_write(struct dentry *dentry);
struct dentry *ovl_workdir(struct dentry *dentry);
const struct cred *ovl_override_creds(struct super_block *sb);
+void ovl_revert_creds(const struct cred *oldcred);
struct super_block *ovl_same_sb(struct super_block *sb);
int ovl_can_decode_fh(struct super_block *sb);
struct dentry *ovl_indexdir(struct super_block *sb);
diff --git a/fs/overlayfs/ovl_entry.h b/fs/overlayfs/ovl_entry.h
index ec23703..e38eea8 100644
--- a/fs/overlayfs/ovl_entry.h
+++ b/fs/overlayfs/ovl_entry.h
@@ -20,6 +20,7 @@
bool nfs_export;
int xino;
bool metacopy;
+ bool override_creds;
};
struct ovl_sb {
diff --git a/fs/overlayfs/readdir.c b/fs/overlayfs/readdir.c
index cc8303a..ec591b4 100644
--- a/fs/overlayfs/readdir.c
+++ b/fs/overlayfs/readdir.c
@@ -289,7 +289,7 @@
}
inode_unlock(dir->d_inode);
}
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
return err;
}
@@ -921,7 +921,7 @@
old_cred = ovl_override_creds(dentry->d_sb);
err = ovl_dir_read_merged(dentry, list, &root);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
if (err)
return err;
diff --git a/fs/overlayfs/super.c b/fs/overlayfs/super.c
index 0fb0a59..df77062 100644
--- a/fs/overlayfs/super.c
+++ b/fs/overlayfs/super.c
@@ -56,6 +56,11 @@
MODULE_PARM_DESC(ovl_xino_auto_def,
"Auto enable xino feature");
+static bool __read_mostly ovl_override_creds_def = true;
+module_param_named(override_creds, ovl_override_creds_def, bool, 0644);
+MODULE_PARM_DESC(ovl_override_creds_def,
+ "Use mounter's credentials for accesses");
+
static void ovl_entry_stack_free(struct ovl_entry *oe)
{
unsigned int i;
@@ -362,6 +367,9 @@
if (ofs->config.metacopy != ovl_metacopy_def)
seq_printf(m, ",metacopy=%s",
ofs->config.metacopy ? "on" : "off");
+ if (ofs->config.override_creds != ovl_override_creds_def)
+ seq_show_option(m, "override_creds",
+ ofs->config.override_creds ? "on" : "off");
return 0;
}
@@ -401,6 +409,8 @@
OPT_XINO_AUTO,
OPT_METACOPY_ON,
OPT_METACOPY_OFF,
+ OPT_OVERRIDE_CREDS_ON,
+ OPT_OVERRIDE_CREDS_OFF,
OPT_ERR,
};
@@ -419,6 +429,8 @@
{OPT_XINO_AUTO, "xino=auto"},
{OPT_METACOPY_ON, "metacopy=on"},
{OPT_METACOPY_OFF, "metacopy=off"},
+ {OPT_OVERRIDE_CREDS_ON, "override_creds=on"},
+ {OPT_OVERRIDE_CREDS_OFF, "override_creds=off"},
{OPT_ERR, NULL}
};
@@ -477,6 +489,7 @@
config->redirect_mode = kstrdup(ovl_redirect_mode_def(), GFP_KERNEL);
if (!config->redirect_mode)
return -ENOMEM;
+ config->override_creds = ovl_override_creds_def;
while ((p = ovl_next_opt(&opt)) != NULL) {
int token;
@@ -557,6 +570,14 @@
config->metacopy = false;
break;
+ case OPT_OVERRIDE_CREDS_ON:
+ config->override_creds = true;
+ break;
+
+ case OPT_OVERRIDE_CREDS_OFF:
+ config->override_creds = false;
+ break;
+
default:
pr_err("overlayfs: unrecognized mount option \"%s\" or missing value\n", p);
return -EINVAL;
@@ -1521,7 +1542,6 @@
ovl_dentry_lower(root_dentry), NULL);
sb->s_root = root_dentry;
-
return 0;
out_free_oe:
diff --git a/fs/overlayfs/util.c b/fs/overlayfs/util.c
index ace4fe4..470310e 100644
--- a/fs/overlayfs/util.c
+++ b/fs/overlayfs/util.c
@@ -40,9 +40,17 @@
{
struct ovl_fs *ofs = sb->s_fs_info;
+ if (!ofs->config.override_creds)
+ return NULL;
return override_creds(ofs->creator_cred);
}
+void ovl_revert_creds(const struct cred *old_cred)
+{
+ if (old_cred)
+ revert_creds(old_cred);
+}
+
struct super_block *ovl_same_sb(struct super_block *sb)
{
struct ovl_fs *ofs = sb->s_fs_info;
@@ -783,7 +791,7 @@
* value relative to the upper inode nlink in an upper inode xattr.
*/
err = ovl_set_nlink_upper(dentry);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
out:
if (err)
@@ -803,7 +811,7 @@
old_cred = ovl_override_creds(dentry->d_sb);
ovl_cleanup_index(dentry);
- revert_creds(old_cred);
+ ovl_revert_creds(old_cred);
}
mutex_unlock(&OVL_I(d_inode(dentry))->lock);
diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
index 4d96a7c..cad2c60 100644
--- a/fs/proc/Kconfig
+++ b/fs/proc/Kconfig
@@ -100,7 +100,6 @@
config PROC_UID
bool "Include /proc/uid/ files"
- default y
depends on PROC_FS && RT_MUTEXES
help
Provides aggregated per-uid information under /proc/uid.
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index 8ae1094..e39bac9 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -256,7 +256,7 @@
inode = proc_get_inode(dir->i_sb, de);
if (!inode)
return ERR_PTR(-ENOMEM);
- d_set_d_op(dentry, &proc_misc_dentry_ops);
+ d_set_d_op(dentry, de->proc_dops);
return d_splice_alias(inode, dentry);
}
read_unlock(&proc_subdir_lock);
@@ -429,6 +429,8 @@
INIT_LIST_HEAD(&ent->pde_openers);
proc_set_user(ent, (*parent)->uid, (*parent)->gid);
+ ent->proc_dops = &proc_misc_dentry_ops;
+
out:
return ent;
}
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index c0c7abb..bacad3e 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -44,6 +44,7 @@
struct completion *pde_unload_completion;
const struct inode_operations *proc_iops;
const struct file_operations *proc_fops;
+ const struct dentry_operations *proc_dops;
union {
const struct seq_operations *seq_ops;
int (*single_show)(struct seq_file *, void *);
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index d5e0fcb..a7b1243 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -38,6 +38,22 @@
return maybe_get_net(PDE_NET(PDE(inode)));
}
+static int proc_net_d_revalidate(struct dentry *dentry, unsigned int flags)
+{
+ return 0;
+}
+
+static const struct dentry_operations proc_net_dentry_ops = {
+ .d_revalidate = proc_net_d_revalidate,
+ .d_delete = always_delete_dentry,
+};
+
+static void pde_force_lookup(struct proc_dir_entry *pde)
+{
+ /* /proc/net/ entries can be changed under us by setns(CLONE_NEWNET) */
+ pde->proc_dops = &proc_net_dentry_ops;
+}
+
static int seq_open_net(struct inode *inode, struct file *file)
{
unsigned int state_size = PDE(inode)->state_size;
@@ -90,6 +106,7 @@
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
+ pde_force_lookup(p);
p->proc_fops = &proc_net_seq_fops;
p->seq_ops = ops;
p->state_size = state_size;
@@ -133,6 +150,7 @@
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
+ pde_force_lookup(p);
p->proc_fops = &proc_net_seq_fops;
p->seq_ops = ops;
p->state_size = state_size;
@@ -181,6 +199,7 @@
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
+ pde_force_lookup(p);
p->proc_fops = &proc_net_single_fops;
p->single_show = show;
return proc_register(parent, p);
@@ -223,6 +242,7 @@
p = proc_create_reg(name, mode, &parent, data);
if (!p)
return NULL;
+ pde_force_lookup(p);
p->proc_fops = &proc_net_single_fops;
p->single_show = show;
p->write = write;
diff --git a/include/drm/drm_cache.h b/include/drm/drm_cache.h
index bfe1639..97fc498 100644
--- a/include/drm/drm_cache.h
+++ b/include/drm/drm_cache.h
@@ -47,6 +47,24 @@
return false;
#elif defined(CONFIG_MIPS) && defined(CONFIG_CPU_LOONGSON3)
return false;
+#elif defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+ /*
+ * The DRM driver stack is designed to work with cache coherent devices
+ * only, but permits an optimization to be enabled in some cases, where
+ * for some buffers, both the CPU and the GPU use uncached mappings,
+ * removing the need for DMA snooping and allocation in the CPU caches.
+ *
+ * The use of uncached GPU mappings relies on the correct implementation
+ * of the PCIe NoSnoop TLP attribute by the platform, otherwise the GPU
+ * will use cached mappings nonetheless. On x86 platforms, this does not
+ * seem to matter, as uncached CPU mappings will snoop the caches in any
+ * case. However, on ARM and arm64, enabling this optimization on a
+ * platform where NoSnoop is ignored results in loss of coherency, which
+ * breaks correct operation of the device. Since we have no way of
+ * detecting whether NoSnoop works or not, just disable this
+ * optimization entirely for ARM and arm64.
+ */
+ return false;
#else
return true;
#endif
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index 7f78d26..a72efa0 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -634,4 +634,12 @@
int drm_dp_send_power_updown_phy(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port, bool power_up);
+int drm_dp_send_dpcd_write(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes);
+
+int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port,
+ int offset, int size, u8 *bytes);
+
#endif
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 6881973..dae9863 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -254,20 +254,12 @@
static struct freq_attr _name = \
__ATTR(_name, 0200, NULL, store_##_name)
-struct global_attr {
- struct attribute attr;
- ssize_t (*show)(struct kobject *kobj,
- struct attribute *attr, char *buf);
- ssize_t (*store)(struct kobject *a, struct attribute *b,
- const char *c, size_t count);
-};
-
#define define_one_global_ro(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0444, show_##_name, NULL)
#define define_one_global_rw(_name) \
-static struct global_attr _name = \
+static struct kobj_attribute _name = \
__ATTR(_name, 0644, show_##_name, store_##_name)
diff --git a/include/linux/cpufreq_times.h b/include/linux/cpufreq_times.h
index 757bf0c..0eb6dc9 100644
--- a/include/linux/cpufreq_times.h
+++ b/include/linux/cpufreq_times.h
@@ -27,7 +27,8 @@
struct pid *pid, struct task_struct *p);
void cpufreq_acct_update_power(struct task_struct *p, u64 cputime);
void cpufreq_times_create_policy(struct cpufreq_policy *policy);
-void cpufreq_times_record_transition(struct cpufreq_freqs *freq);
+void cpufreq_times_record_transition(struct cpufreq_policy *policy,
+ unsigned int new_freq);
void cpufreq_task_times_remove_uids(uid_t uid_start, uid_t uid_end);
int single_uid_time_in_state_open(struct inode *inode, struct file *file);
#else
@@ -38,7 +39,7 @@
u64 cputime) {}
static inline void cpufreq_times_create_policy(struct cpufreq_policy *policy) {}
static inline void cpufreq_times_record_transition(
- struct cpufreq_freqs *freq) {}
+ struct cpufreq_policy *policy, unsigned int new_freq) {}
static inline void cpufreq_task_times_remove_uids(uid_t uid_start,
uid_t uid_end) {}
#endif /* CONFIG_CPU_FREQ_TIMES */
diff --git a/include/linux/hdcp_qseecom.h b/include/linux/hdcp_qseecom.h
index 96b24a1..34ffb9f 100644
--- a/include/linux/hdcp_qseecom.h
+++ b/include/linux/hdcp_qseecom.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
*/
#ifndef __HDCP_QSEECOM_H
@@ -11,6 +11,7 @@
enum hdcp2_app_cmd {
HDCP2_CMD_START,
+ HDCP2_CMD_START_AUTH,
HDCP2_CMD_STOP,
HDCP2_CMD_PROCESS_MSG,
HDCP2_CMD_TIMEOUT,
@@ -35,6 +36,8 @@
switch (cmd) {
case HDCP2_CMD_START:
return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START);
+ case HDCP2_CMD_START_AUTH:
+ return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_START_AUTH);
case HDCP2_CMD_STOP:
return HDCP_QSEECOM_ENUM_STR(HDCP2_CMD_STOP);
case HDCP2_CMD_PROCESS_MSG:
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 8bdbb5f..3188c0b 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -319,7 +319,7 @@
#define GITS_TYPER_PLPIS (1UL << 0)
#define GITS_TYPER_VLPIS (1UL << 1)
#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
-#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+#define GITS_TYPER_ITT_ENTRY_SIZE(r) ((((r) >> GITS_TYPER_ITT_ENTRY_SIZE_SHIFT) & 0xf) + 1)
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
diff --git a/include/linux/psi.h b/include/linux/psi.h
index 7006008..af892c2 100644
--- a/include/linux/psi.h
+++ b/include/linux/psi.h
@@ -4,6 +4,7 @@
#include <linux/jump_label.h>
#include <linux/psi_types.h>
#include <linux/sched.h>
+#include <linux/poll.h>
struct seq_file;
struct css_set;
@@ -26,6 +27,13 @@
int psi_cgroup_alloc(struct cgroup *cgrp);
void psi_cgroup_free(struct cgroup *cgrp);
void cgroup_move_task(struct task_struct *p, struct css_set *to);
+
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+ char *buf, size_t nbytes, enum psi_res res);
+void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *t);
+
+__poll_t psi_trigger_poll(void **trigger_ptr, struct file *file,
+ poll_table *wait);
#endif
#else /* CONFIG_PSI */
diff --git a/include/linux/psi_types.h b/include/linux/psi_types.h
index 4d1c1f6..07aaf9b 100644
--- a/include/linux/psi_types.h
+++ b/include/linux/psi_types.h
@@ -1,8 +1,11 @@
#ifndef _LINUX_PSI_TYPES_H
#define _LINUX_PSI_TYPES_H
+#include <linux/kthread.h>
#include <linux/seqlock.h>
#include <linux/types.h>
+#include <linux/kref.h>
+#include <linux/wait.h>
#ifdef CONFIG_PSI
@@ -44,6 +47,12 @@
NR_PSI_STATES = 6,
};
+enum psi_aggregators {
+ PSI_AVGS = 0,
+ PSI_POLL,
+ NR_PSI_AGGREGATORS,
+};
+
struct psi_group_cpu {
/* 1st cacheline updated by the scheduler */
@@ -65,7 +74,55 @@
/* 2nd cacheline updated by the aggregator */
/* Delta detection against the sampling buckets */
- u32 times_prev[NR_PSI_STATES] ____cacheline_aligned_in_smp;
+ u32 times_prev[NR_PSI_AGGREGATORS][NR_PSI_STATES]
+ ____cacheline_aligned_in_smp;
+};
+
+/* PSI growth tracking window */
+struct psi_window {
+ /* Window size in ns */
+ u64 size;
+
+ /* Start time of the current window in ns */
+ u64 start_time;
+
+ /* Value at the start of the window */
+ u64 start_value;
+
+ /* Value growth in the previous window */
+ u64 prev_growth;
+};
+
+struct psi_trigger {
+ /* PSI state being monitored by the trigger */
+ enum psi_states state;
+
+ /* User-spacified threshold in ns */
+ u64 threshold;
+
+ /* List node inside triggers list */
+ struct list_head node;
+
+ /* Backpointer needed during trigger destruction */
+ struct psi_group *group;
+
+ /* Wait queue for polling */
+ wait_queue_head_t event_wait;
+
+ /* Pending event flag */
+ int event;
+
+ /* Tracking window */
+ struct psi_window win;
+
+ /*
+ * Time last event was generated. Used for rate-limiting
+ * events to one per window
+ */
+ u64 last_event_time;
+
+ /* Refcounting to prevent premature destruction */
+ struct kref refcount;
};
struct psi_group {
@@ -79,11 +136,32 @@
u64 avg_total[NR_PSI_STATES - 1];
u64 avg_last_update;
u64 avg_next_update;
+
+ /* Aggregator work control */
struct delayed_work avgs_work;
/* Total stall times and sampled pressure averages */
- u64 total[NR_PSI_STATES - 1];
+ u64 total[NR_PSI_AGGREGATORS][NR_PSI_STATES - 1];
unsigned long avg[NR_PSI_STATES - 1][3];
+
+ /* Monitor work control */
+ atomic_t poll_scheduled;
+ struct kthread_worker __rcu *poll_kworker;
+ struct kthread_delayed_work poll_work;
+
+ /* Protects data used by the monitor */
+ struct mutex trigger_lock;
+
+ /* Configured polling triggers */
+ struct list_head triggers;
+ u32 nr_triggers[NR_PSI_STATES - 1];
+ u32 poll_states;
+ u64 poll_min_period;
+
+ /* Total stall times at the start of monitor activation */
+ u64 polling_total[NR_PSI_STATES - 1];
+ u64 polling_next_update;
+ u64 polling_until;
};
#else /* CONFIG_PSI */
diff --git a/include/linux/qcom-geni-se.h b/include/linux/qcom-geni-se.h
index 9df8d9b..bb7faa6 100644
--- a/include/linux/qcom-geni-se.h
+++ b/include/linux/qcom-geni-se.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
/*
- * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _LINUX_QCOM_GENI_SE
@@ -37,9 +37,16 @@
* @m_ahb_clk: Handle to the primary AHB clock.
* @s_ahb_clk: Handle to the secondary AHB clock.
* @ab_list: List Head of Average bus banwidth list.
+ * @ab_list_noc: List Head of Average DDR path bus
+ bandwidth list.
* @ab: Average bus bandwidth request value.
+ * @ab_noc: Average DDR path bus bandwidth request value.
* @ib_list: List Head of Instantaneous bus banwidth list.
+ * @ib_list_noc: List Head of Instantaneous DDR path bus
+ bandwidth list.
* @ib: Instantaneous bus bandwidth request value.
+ * @ib_noc: Instantaneous DDR path bus bandwidth
+ request value.
* @geni_pinctrl: Handle to the pinctrl configuration.
* @geni_gpio_active: Handle to the default/active pinctrl state.
* @geni_gpi_sleep: Handle to the sleep pinctrl state.
@@ -51,9 +58,13 @@
struct clk *m_ahb_clk;
struct clk *s_ahb_clk;
struct list_head ab_list;
+ struct list_head ab_list_noc;
unsigned long ab;
+ unsigned long ab_noc;
struct list_head ib_list;
+ struct list_head ib_list_noc;
unsigned long ib;
+ unsigned long ib_noc;
struct pinctrl *geni_pinctrl;
struct pinctrl_state *geni_gpio_active;
struct pinctrl_state *geni_gpio_sleep;
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index 7ddfc65..4335bd7 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -184,6 +184,7 @@
struct clk *pclk;
struct clk *clk_ptp_ref;
unsigned int clk_ptp_rate;
+ unsigned int clk_ref_rate;
struct reset_control *stmmac_rst;
struct stmmac_axi *axi;
int has_gmac4;
diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h
index ec9d6bc..fabee6d 100644
--- a/include/net/bluetooth/bluetooth.h
+++ b/include/net/bluetooth/bluetooth.h
@@ -276,7 +276,7 @@
int bt_sock_wait_state(struct sock *sk, int state, unsigned long timeo);
int bt_sock_wait_ready(struct sock *sk, unsigned long flags);
-void bt_accept_enqueue(struct sock *parent, struct sock *sk);
+void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh);
void bt_accept_unlink(struct sock *sk);
struct sock *bt_accept_dequeue(struct sock *parent, struct socket *newsock);
diff --git a/include/net/icmp.h b/include/net/icmp.h
index 3ef2743..8665bf2 100644
--- a/include/net/icmp.h
+++ b/include/net/icmp.h
@@ -22,6 +22,7 @@
#include <net/inet_sock.h>
#include <net/snmp.h>
+#include <net/ip.h>
struct icmp_err {
int errno;
@@ -39,7 +40,13 @@
struct sk_buff;
struct net;
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info);
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ const struct ip_options *opt);
+static inline void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+{
+ __icmp_send(skb_in, type, code, info, &IPCB(skb_in)->opt);
+}
+
int icmp_rcv(struct sk_buff *skb);
void icmp_err(struct sk_buff *skb, u32 info);
int icmp_init(void);
diff --git a/include/net/ip.h b/include/net/ip.h
index ddaa2bb5..0693b82 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -641,6 +641,8 @@
}
void ip_options_fragment(struct sk_buff *skb);
+int __ip_options_compile(struct net *net, struct ip_options *opt,
+ struct sk_buff *skb, __be32 *info);
int ip_options_compile(struct net *net, struct ip_options *opt,
struct sk_buff *skb);
int ip_options_get(struct net *net, struct ip_options_rcu **optp,
@@ -690,7 +692,7 @@
int ip_misc_proc_init(void);
#endif
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
struct netlink_ext_ack *extack);
#endif /* _IP_H */
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index a6d0009..c44da48 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -47,7 +47,10 @@
struct qdisc_skb_head {
struct sk_buff *head;
struct sk_buff *tail;
- __u32 qlen;
+ union {
+ u32 qlen;
+ atomic_t atomic_qlen;
+ };
spinlock_t lock;
};
@@ -384,27 +387,19 @@
BUILD_BUG_ON(sizeof(qcb->data) < sz);
}
-static inline int qdisc_qlen_cpu(const struct Qdisc *q)
-{
- return this_cpu_ptr(q->cpu_qstats)->qlen;
-}
-
static inline int qdisc_qlen(const struct Qdisc *q)
{
return q->q.qlen;
}
-static inline int qdisc_qlen_sum(const struct Qdisc *q)
+static inline u32 qdisc_qlen_sum(const struct Qdisc *q)
{
- __u32 qlen = q->qstats.qlen;
- int i;
+ u32 qlen = q->qstats.qlen;
- if (q->flags & TCQ_F_NOLOCK) {
- for_each_possible_cpu(i)
- qlen += per_cpu_ptr(q->cpu_qstats, i)->qlen;
- } else {
+ if (q->flags & TCQ_F_NOLOCK)
+ qlen += atomic_read(&q->q.atomic_qlen);
+ else
qlen += q->q.qlen;
- }
return qlen;
}
@@ -776,14 +771,14 @@
this_cpu_add(sch->cpu_qstats->backlog, qdisc_pkt_len(skb));
}
-static inline void qdisc_qstats_cpu_qlen_inc(struct Qdisc *sch)
+static inline void qdisc_qstats_atomic_qlen_inc(struct Qdisc *sch)
{
- this_cpu_inc(sch->cpu_qstats->qlen);
+ atomic_inc(&sch->q.atomic_qlen);
}
-static inline void qdisc_qstats_cpu_qlen_dec(struct Qdisc *sch)
+static inline void qdisc_qstats_atomic_qlen_dec(struct Qdisc *sch)
{
- this_cpu_dec(sch->cpu_qstats->qlen);
+ atomic_dec(&sch->q.atomic_qlen);
}
static inline void qdisc_qstats_cpu_requeues_inc(struct Qdisc *sch)
diff --git a/include/soc/qcom/qmi_rmnet.h b/include/soc/qcom/qmi_rmnet.h
index 019ac14..f75e538 100644
--- a/include/soc/qcom/qmi_rmnet.h
+++ b/include/soc/qcom/qmi_rmnet.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _QMI_RMNET_H
@@ -20,6 +20,7 @@
void qmi_rmnet_qmi_exit(void *qmi_pt, void *port);
void qmi_rmnet_change_link(struct net_device *dev, void *port, void *tcm_pt);
void qmi_rmnet_enable_all_flows(struct net_device *dev);
+bool qmi_rmnet_all_flows_enabled(struct net_device *dev);
#else
static inline void qmi_rmnet_qmi_exit(void *qmi_pt, void *port)
{
@@ -34,6 +35,12 @@
qmi_rmnet_enable_all_flows(struct net_device *dev)
{
}
+
+static inline bool
+qmi_rmnet_all_flows_enabled(struct net_device *dev)
+{
+ return true;
+}
#endif
#ifdef CONFIG_QCOM_QMI_DFC
@@ -71,7 +78,7 @@
void qmi_rmnet_work_init(void *port);
void qmi_rmnet_work_exit(void *port);
void qmi_rmnet_work_maybe_restart(void *port);
-void qmi_rmnet_work_restart(void *port);
+void qmi_rmnet_set_dl_msg_active(void *port);
int qmi_rmnet_ps_ind_register(void *port,
struct qmi_rmnet_ps_ind *ps_ind);
@@ -88,18 +95,16 @@
static inline void qmi_rmnet_work_init(void *port)
{
}
-static inline void qmi_rmnet_work_restart(void *port)
-{
-
-}
static inline void qmi_rmnet_work_exit(void *port)
{
}
-
static inline void qmi_rmnet_work_maybe_restart(void *port)
{
}
+static inline void qmi_rmnet_set_dl_msg_active(void *port)
+{
+}
static inline int qmi_rmnet_ps_ind_register(struct rmnet_port *port,
struct qmi_rmnet_ps_ind *ps_ind)
diff --git a/include/soc/qcom/rmnet_qmi.h b/include/soc/qcom/rmnet_qmi.h
index a5289c8..9096b10 100644
--- a/include/soc/qcom/rmnet_qmi.h
+++ b/include/soc/qcom/rmnet_qmi.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#ifndef _RMNET_QMI_H
@@ -19,6 +19,7 @@
void rmnet_reset_qmi_pt(void *port);
void rmnet_init_qmi_pt(void *port, void *qmi);
void rmnet_enable_all_flows(void *port);
+bool rmnet_all_flows_enabled(void *port);
void rmnet_set_powersave_format(void *port);
void rmnet_clear_powersave_format(void *port);
void rmnet_get_packets(void *port, u64 *rx, u64 *tx);
@@ -57,6 +58,11 @@
{
}
+static inline bool rmnet_all_flows_enabled(void *port)
+{
+ return true;
+}
+
static inline void rmnet_set_port_format(void *port)
{
}
diff --git a/include/trace/events/dfc.h b/include/trace/events/dfc.h
index 4ba0fd4..cb62767 100644
--- a/include/trace/events/dfc.h
+++ b/include/trace/events/dfc.h
@@ -1,5 +1,5 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-/* Copyright (c) 2018, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
*/
#undef TRACE_SYSTEM
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index f5271bc..93333c0 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -119,7 +119,8 @@
#define DRM_MODE_FLAG_SUPPORTS_RGB (1<<23)
#define DRM_MODE_FLAG_SUPPORTS_YUV (1<<24)
-
+#define DRM_MODE_FLAG_VID_MODE_PANEL (1<<29)
+#define DRM_MODE_FLAG_CMD_MODE_PANEL (1<<30)
#define DRM_MODE_FLAG_SEAMLESS (1<<31)
#define DRM_MODE_FLAG_ALL (DRM_MODE_FLAG_PHSYNC | \
@@ -136,6 +137,8 @@
DRM_MODE_FLAG_CLKDIV2 | \
DRM_MODE_FLAG_SUPPORTS_RGB | \
DRM_MODE_FLAG_SUPPORTS_YUV | \
+ DRM_MODE_FLAG_VID_MODE_PANEL | \
+ DRM_MODE_FLAG_CMD_MODE_PANEL | \
DRM_MODE_FLAG_3D_MASK)
/* DPMS flags */
diff --git a/include/uapi/media/cam_isp.h b/include/uapi/media/cam_isp.h
index 306450e..bba8eeb 100644
--- a/include/uapi/media/cam_isp.h
+++ b/include/uapi/media/cam_isp.h
@@ -96,6 +96,7 @@
#define CAM_ISP_GENERIC_BLOB_TYPE_CSID_CLOCK_CONFIG 4
#define CAM_ISP_GENERIC_BLOB_TYPE_FE_CONFIG 5
#define CAM_ISP_GENERIC_BLOB_TYPE_UBWC_CONFIG_V2 6
+#define CAM_ISP_GENERIC_BLOB_TYPE_IFE_CORE_CONFIG 7
/* Query devices */
/**
@@ -435,6 +436,37 @@
/* Acquire Device/HW v2 */
/**
+ * struct cam_isp_core_config - ISP core registers configuration
+ *
+ * @version: Version info
+ * @vid_ds16_r2pd: Enables Y and C merging PD output for video DS16
+ * @vid_ds4_r2pd: Enables Y and C merging PD output for video DS4
+ * @disp_ds16_r2pd: Enables Y and C merging PD output for disp DS16
+ * @disp_ds4_r2pd: Enables Y and C merging PD output for disp DS4
+ * @dsp_streaming_tap_point: This selects source for DSP streaming interface
+ * @ihist_src_sel: Selects input for IHIST module
+ * @hdr_be_src_sel: Selects input for HDR BE module
+ * @hdr_bhist_src_sel: Selects input for HDR BHIST module
+ * @input_mux_sel_pdaf: Selects input for PDAF
+ * @input_mux_sel_pp: Selects input for Pixel Pipe
+ * @reserved: Reserved
+ */
+struct cam_isp_core_config {
+ uint32_t version;
+ uint32_t vid_ds16_r2pd;
+ uint32_t vid_ds4_r2pd;
+ uint32_t disp_ds16_r2pd;
+ uint32_t disp_ds4_r2pd;
+ uint32_t dsp_streaming_tap_point;
+ uint32_t ihist_src_sel;
+ uint32_t hdr_be_src_sel;
+ uint32_t hdr_bhist_src_sel;
+ uint32_t input_mux_sel_pdaf;
+ uint32_t input_mux_sel_pp;
+ uint32_t reserved;
+} __attribute__((packed));
+
+/**
* struct cam_isp_acquire_hw_info - ISP acquire HW params
*
* @common_info_version : Version of common info struct used
diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c
index 03cc59e..cebadd6 100644
--- a/kernel/bpf/hashtab.c
+++ b/kernel/bpf/hashtab.c
@@ -677,7 +677,7 @@
}
if (htab_is_prealloc(htab)) {
- pcpu_freelist_push(&htab->freelist, &l->fnode);
+ __pcpu_freelist_push(&htab->freelist, &l->fnode);
} else {
atomic_dec(&htab->count);
l->htab = htab;
@@ -739,7 +739,7 @@
} else {
struct pcpu_freelist_node *l;
- l = pcpu_freelist_pop(&htab->freelist);
+ l = __pcpu_freelist_pop(&htab->freelist);
if (!l)
return ERR_PTR(-E2BIG);
l_new = container_of(l, struct htab_elem, fnode);
diff --git a/kernel/bpf/percpu_freelist.c b/kernel/bpf/percpu_freelist.c
index 673fa6f..0c1b4ba 100644
--- a/kernel/bpf/percpu_freelist.c
+++ b/kernel/bpf/percpu_freelist.c
@@ -28,8 +28,8 @@
free_percpu(s->freelist);
}
-static inline void __pcpu_freelist_push(struct pcpu_freelist_head *head,
- struct pcpu_freelist_node *node)
+static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
+ struct pcpu_freelist_node *node)
{
raw_spin_lock(&head->lock);
node->next = head->first;
@@ -37,12 +37,22 @@
raw_spin_unlock(&head->lock);
}
-void pcpu_freelist_push(struct pcpu_freelist *s,
+void __pcpu_freelist_push(struct pcpu_freelist *s,
struct pcpu_freelist_node *node)
{
struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
- __pcpu_freelist_push(head, node);
+ ___pcpu_freelist_push(head, node);
+}
+
+void pcpu_freelist_push(struct pcpu_freelist *s,
+ struct pcpu_freelist_node *node)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ __pcpu_freelist_push(s, node);
+ local_irq_restore(flags);
}
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
@@ -63,7 +73,7 @@
for_each_possible_cpu(cpu) {
again:
head = per_cpu_ptr(s->freelist, cpu);
- __pcpu_freelist_push(head, buf);
+ ___pcpu_freelist_push(head, buf);
i++;
buf += elem_size;
if (i == nr_elems)
@@ -74,14 +84,12 @@
local_irq_restore(flags);
}
-struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
{
struct pcpu_freelist_head *head;
struct pcpu_freelist_node *node;
- unsigned long flags;
int orig_cpu, cpu;
- local_irq_save(flags);
orig_cpu = cpu = raw_smp_processor_id();
while (1) {
head = per_cpu_ptr(s->freelist, cpu);
@@ -89,16 +97,25 @@
node = head->first;
if (node) {
head->first = node->next;
- raw_spin_unlock_irqrestore(&head->lock, flags);
+ raw_spin_unlock(&head->lock);
return node;
}
raw_spin_unlock(&head->lock);
cpu = cpumask_next(cpu, cpu_possible_mask);
if (cpu >= nr_cpu_ids)
cpu = 0;
- if (cpu == orig_cpu) {
- local_irq_restore(flags);
+ if (cpu == orig_cpu)
return NULL;
- }
}
}
+
+struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)
+{
+ struct pcpu_freelist_node *ret;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ ret = __pcpu_freelist_pop(s);
+ local_irq_restore(flags);
+ return ret;
+}
diff --git a/kernel/bpf/percpu_freelist.h b/kernel/bpf/percpu_freelist.h
index 3049aae..c396011 100644
--- a/kernel/bpf/percpu_freelist.h
+++ b/kernel/bpf/percpu_freelist.h
@@ -22,8 +22,12 @@
struct pcpu_freelist_node *next;
};
+/* pcpu_freelist_* do spin_lock_irqsave. */
void pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *);
+/* __pcpu_freelist_* do spin_lock only. caller must disable irqs. */
+void __pcpu_freelist_push(struct pcpu_freelist *, struct pcpu_freelist_node *);
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *);
void pcpu_freelist_populate(struct pcpu_freelist *s, void *buf, u32 elem_size,
u32 nr_elems);
int pcpu_freelist_init(struct pcpu_freelist *);
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 382c09d..cc40b8b 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -701,8 +701,13 @@
if (bpf_map_is_dev_bound(map)) {
err = bpf_map_offload_lookup_elem(map, key, value);
- } else if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
- map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
+ goto done;
+ }
+
+ preempt_disable();
+ this_cpu_inc(bpf_prog_active);
+ if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
+ map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
err = bpf_percpu_hash_copy(map, key, value);
} else if (map->map_type == BPF_MAP_TYPE_PERCPU_ARRAY) {
err = bpf_percpu_array_copy(map, key, value);
@@ -722,7 +727,10 @@
rcu_read_unlock();
err = ptr ? 0 : -ENOENT;
}
+ this_cpu_dec(bpf_prog_active);
+ preempt_enable();
+done:
if (err)
goto free_value;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 4d81be2..bcb42aa 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -6035,7 +6035,8 @@
u32 off_reg;
aux = &env->insn_aux_data[i + delta];
- if (!aux->alu_state)
+ if (!aux->alu_state ||
+ aux->alu_state == BPF_ALU_NON_POINTER)
continue;
isneg = aux->alu_state & BPF_ALU_NEG_VALUE;
diff --git a/kernel/cgroup/cgroup.c b/kernel/cgroup/cgroup.c
index 4d631cb..eba5cab 100644
--- a/kernel/cgroup/cgroup.c
+++ b/kernel/cgroup/cgroup.c
@@ -3430,7 +3430,65 @@
{
return psi_show(seq, &seq_css(seq)->cgroup->psi, PSI_CPU);
}
-#endif
+
+static ssize_t cgroup_pressure_write(struct kernfs_open_file *of, char *buf,
+ size_t nbytes, enum psi_res res)
+{
+ struct psi_trigger *new;
+ struct cgroup *cgrp;
+
+ cgrp = cgroup_kn_lock_live(of->kn, false);
+ if (!cgrp)
+ return -ENODEV;
+
+ cgroup_get(cgrp);
+ cgroup_kn_unlock(of->kn);
+
+ new = psi_trigger_create(&cgrp->psi, buf, nbytes, res);
+ if (IS_ERR(new)) {
+ cgroup_put(cgrp);
+ return PTR_ERR(new);
+ }
+
+ psi_trigger_replace(&of->priv, new);
+
+ cgroup_put(cgrp);
+
+ return nbytes;
+}
+
+static ssize_t cgroup_io_pressure_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes,
+ loff_t off)
+{
+ return cgroup_pressure_write(of, buf, nbytes, PSI_IO);
+}
+
+static ssize_t cgroup_memory_pressure_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes,
+ loff_t off)
+{
+ return cgroup_pressure_write(of, buf, nbytes, PSI_MEM);
+}
+
+static ssize_t cgroup_cpu_pressure_write(struct kernfs_open_file *of,
+ char *buf, size_t nbytes,
+ loff_t off)
+{
+ return cgroup_pressure_write(of, buf, nbytes, PSI_CPU);
+}
+
+static __poll_t cgroup_pressure_poll(struct kernfs_open_file *of,
+ poll_table *pt)
+{
+ return psi_trigger_poll(&of->priv, of->file, pt);
+}
+
+static void cgroup_pressure_release(struct kernfs_open_file *of)
+{
+ psi_trigger_replace(&of->priv, NULL);
+}
+#endif /* CONFIG_PSI */
static int cgroup_file_open(struct kernfs_open_file *of)
{
@@ -4584,18 +4642,27 @@
.name = "io.pressure",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cgroup_io_pressure_show,
+ .write = cgroup_io_pressure_write,
+ .poll = cgroup_pressure_poll,
+ .release = cgroup_pressure_release,
},
{
.name = "memory.pressure",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cgroup_memory_pressure_show,
+ .write = cgroup_memory_pressure_write,
+ .poll = cgroup_pressure_poll,
+ .release = cgroup_pressure_release,
},
{
.name = "cpu.pressure",
.flags = CFTYPE_NOT_ON_ROOT,
.seq_show = cgroup_cpu_pressure_show,
+ .write = cgroup_cpu_pressure_write,
+ .poll = cgroup_pressure_poll,
+ .release = cgroup_pressure_release,
},
-#endif
+#endif /* CONFIG_PSI */
{ } /* terminate */
};
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0709f85..c89f8ea 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -469,18 +469,18 @@
void __user *buffer, size_t *lenp,
loff_t *ppos)
{
- int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-
- if (ret || !write)
- return ret;
-
+ int ret;
+ int perf_cpu = sysctl_perf_cpu_time_max_percent;
/*
* If throttling is disabled don't allow the write:
*/
- if (sysctl_perf_cpu_time_max_percent == 100 ||
- sysctl_perf_cpu_time_max_percent == 0)
+ if (write && (perf_cpu == 100 || perf_cpu == 0))
return -EINVAL;
+ ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
+ if (ret || !write)
+ return ret;
+
max_samples_per_tick = DIV_ROUND_UP(sysctl_perf_event_sample_rate, HZ);
perf_sample_period_ns = NSEC_PER_SEC / sysctl_perf_event_sample_rate;
update_perf_cpu_limits();
diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c
index a2b3d9d..e521950 100644
--- a/kernel/irq/chip.c
+++ b/kernel/irq/chip.c
@@ -1376,6 +1376,10 @@
int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on)
{
data = data->parent_data;
+
+ if (data->chip->flags & IRQCHIP_SKIP_SET_WAKE)
+ return 0;
+
if (data->chip->irq_set_wake)
return data->chip->irq_set_wake(data, on);
diff --git a/kernel/relay.c b/kernel/relay.c
index 04f2486..9e0f523 100644
--- a/kernel/relay.c
+++ b/kernel/relay.c
@@ -428,6 +428,8 @@
dentry = chan->cb->create_buf_file(tmpname, chan->parent,
S_IRUSR, buf,
&chan->is_global);
+ if (IS_ERR(dentry))
+ dentry = NULL;
kfree(tmpname);
@@ -461,7 +463,7 @@
dentry = chan->cb->create_buf_file(NULL, NULL,
S_IRUSR, buf,
&chan->is_global);
- if (WARN_ON(dentry))
+ if (IS_ERR_OR_NULL(dentry))
goto free_buf;
}
diff --git a/kernel/sched/psi.c b/kernel/sched/psi.c
index 1b99eef..e88918e 100644
--- a/kernel/sched/psi.c
+++ b/kernel/sched/psi.c
@@ -4,6 +4,9 @@
* Copyright (c) 2018 Facebook, Inc.
* Author: Johannes Weiner <hannes@cmpxchg.org>
*
+ * Polling support by Suren Baghdasaryan <surenb@google.com>
+ * Copyright (c) 2018 Google, Inc.
+ *
* When CPU, memory and IO are contended, tasks experience delays that
* reduce throughput and introduce latencies into the workload. Memory
* and IO contention, in addition, can cause a full loss of forward
@@ -129,9 +132,13 @@
#include <linux/seq_file.h>
#include <linux/proc_fs.h>
#include <linux/seqlock.h>
+#include <linux/uaccess.h>
#include <linux/cgroup.h>
#include <linux/module.h>
#include <linux/sched.h>
+#include <linux/ctype.h>
+#include <linux/file.h>
+#include <linux/poll.h>
#include <linux/psi.h>
#include "sched.h"
@@ -156,6 +163,11 @@
#define EXP_60s 1981 /* 1/exp(2s/60s) */
#define EXP_300s 2034 /* 1/exp(2s/300s) */
+/* PSI trigger definitions */
+#define WINDOW_MIN_US 500000 /* Min window size is 500ms */
+#define WINDOW_MAX_US 10000000 /* Max window size is 10s */
+#define UPDATES_PER_WINDOW 10 /* 10 updates per window */
+
/* Sampling frequency in nanoseconds */
static u64 psi_period __read_mostly;
@@ -176,6 +188,17 @@
group->avg_next_update = sched_clock() + psi_period;
INIT_DELAYED_WORK(&group->avgs_work, psi_avgs_work);
mutex_init(&group->avgs_lock);
+ /* Init trigger-related members */
+ atomic_set(&group->poll_scheduled, 0);
+ mutex_init(&group->trigger_lock);
+ INIT_LIST_HEAD(&group->triggers);
+ memset(group->nr_triggers, 0, sizeof(group->nr_triggers));
+ group->poll_states = 0;
+ group->poll_min_period = U32_MAX;
+ memset(group->polling_total, 0, sizeof(group->polling_total));
+ group->polling_next_update = ULLONG_MAX;
+ group->polling_until = 0;
+ rcu_assign_pointer(group->poll_kworker, NULL);
}
void __init psi_init(void)
@@ -210,7 +233,8 @@
}
}
-static void get_recent_times(struct psi_group *group, int cpu, u32 *times,
+static void get_recent_times(struct psi_group *group, int cpu,
+ enum psi_aggregators aggregator, u32 *times,
u32 *pchanged_states)
{
struct psi_group_cpu *groupc = per_cpu_ptr(group->pcpu, cpu);
@@ -245,8 +269,8 @@
if (state_mask & (1 << s))
times[s] += now - state_start;
- delta = times[s] - groupc->times_prev[s];
- groupc->times_prev[s] = times[s];
+ delta = times[s] - groupc->times_prev[aggregator][s];
+ groupc->times_prev[aggregator][s] = times[s];
times[s] = delta;
if (delta)
@@ -274,7 +298,9 @@
avg[2] = calc_load(avg[2], EXP_300s, pct);
}
-static void collect_percpu_times(struct psi_group *group, u32 *pchanged_states)
+static void collect_percpu_times(struct psi_group *group,
+ enum psi_aggregators aggregator,
+ u32 *pchanged_states)
{
u64 deltas[NR_PSI_STATES - 1] = { 0, };
unsigned long nonidle_total = 0;
@@ -295,7 +321,7 @@
u32 nonidle;
u32 cpu_changed_states;
- get_recent_times(group, cpu, times,
+ get_recent_times(group, cpu, aggregator, times,
&cpu_changed_states);
changed_states |= cpu_changed_states;
@@ -320,7 +346,8 @@
/* total= */
for (s = 0; s < NR_PSI_STATES - 1; s++)
- group->total[s] += div_u64(deltas[s], max(nonidle_total, 1UL));
+ group->total[aggregator][s] +=
+ div_u64(deltas[s], max(nonidle_total, 1UL));
if (pchanged_states)
*pchanged_states = changed_states;
@@ -352,7 +379,7 @@
for (s = 0; s < NR_PSI_STATES - 1; s++) {
u32 sample;
- sample = group->total[s] - group->avg_total[s];
+ sample = group->total[PSI_AVGS][s] - group->avg_total[s];
/*
* Due to the lockless sampling of the time buckets,
* recorded time deltas can slip into the next period,
@@ -394,7 +421,7 @@
now = sched_clock();
- collect_percpu_times(group, &changed_states);
+ collect_percpu_times(group, PSI_AVGS, &changed_states);
nonidle = changed_states & (1 << PSI_NONIDLE);
/*
* If there is task activity, periodically fold the per-cpu
@@ -414,6 +441,187 @@
mutex_unlock(&group->avgs_lock);
}
+/* Trigger tracking window manupulations */
+static void window_reset(struct psi_window *win, u64 now, u64 value,
+ u64 prev_growth)
+{
+ win->start_time = now;
+ win->start_value = value;
+ win->prev_growth = prev_growth;
+}
+
+/*
+ * PSI growth tracking window update and growth calculation routine.
+ *
+ * This approximates a sliding tracking window by interpolating
+ * partially elapsed windows using historical growth data from the
+ * previous intervals. This minimizes memory requirements (by not storing
+ * all the intermediate values in the previous window) and simplifies
+ * the calculations. It works well because PSI signal changes only in
+ * positive direction and over relatively small window sizes the growth
+ * is close to linear.
+ */
+static u64 window_update(struct psi_window *win, u64 now, u64 value)
+{
+ u64 elapsed;
+ u64 growth;
+
+ elapsed = now - win->start_time;
+ growth = value - win->start_value;
+ /*
+ * After each tracking window passes win->start_value and
+ * win->start_time get reset and win->prev_growth stores
+ * the average per-window growth of the previous window.
+ * win->prev_growth is then used to interpolate additional
+ * growth from the previous window assuming it was linear.
+ */
+ if (elapsed > win->size)
+ window_reset(win, now, value, growth);
+ else {
+ u32 remaining;
+
+ remaining = win->size - elapsed;
+ growth += div_u64(win->prev_growth * remaining, win->size);
+ }
+
+ return growth;
+}
+
+static void init_triggers(struct psi_group *group, u64 now)
+{
+ struct psi_trigger *t;
+
+ list_for_each_entry(t, &group->triggers, node)
+ window_reset(&t->win, now,
+ group->total[PSI_POLL][t->state], 0);
+ memcpy(group->polling_total, group->total[PSI_POLL],
+ sizeof(group->polling_total));
+ group->polling_next_update = now + group->poll_min_period;
+}
+
+static u64 update_triggers(struct psi_group *group, u64 now)
+{
+ struct psi_trigger *t;
+ bool new_stall = false;
+ u64 *total = group->total[PSI_POLL];
+
+ /*
+ * On subsequent updates, calculate growth deltas and let
+ * watchers know when their specified thresholds are exceeded.
+ */
+ list_for_each_entry(t, &group->triggers, node) {
+ u64 growth;
+
+ /* Check for stall activity */
+ if (group->polling_total[t->state] == total[t->state])
+ continue;
+
+ /*
+ * Multiple triggers might be looking at the same state,
+ * remember to update group->polling_total[] once we've
+ * been through all of them. Also remember to extend the
+ * polling time if we see new stall activity.
+ */
+ new_stall = true;
+
+ /* Calculate growth since last update */
+ growth = window_update(&t->win, now, total[t->state]);
+ if (growth < t->threshold)
+ continue;
+
+ /* Limit event signaling to once per window */
+ if (now < t->last_event_time + t->win.size)
+ continue;
+
+ /* Generate an event */
+ if (cmpxchg(&t->event, 0, 1) == 0)
+ wake_up_interruptible(&t->event_wait);
+ t->last_event_time = now;
+ }
+
+ if (new_stall)
+ memcpy(group->polling_total, total,
+ sizeof(group->polling_total));
+
+ return now + group->poll_min_period;
+}
+
+/*
+ * Schedule polling if it's not already scheduled. It's safe to call even from
+ * hotpath because even though kthread_queue_delayed_work takes worker->lock
+ * spinlock that spinlock is never contended due to poll_scheduled atomic
+ * preventing such competition.
+ */
+static void psi_schedule_poll_work(struct psi_group *group, unsigned long delay)
+{
+ struct kthread_worker *kworker;
+
+ /* Do not reschedule if already scheduled */
+ if (atomic_cmpxchg(&group->poll_scheduled, 0, 1) != 0)
+ return;
+
+ rcu_read_lock();
+
+ kworker = rcu_dereference(group->poll_kworker);
+ /*
+ * kworker might be NULL in case psi_trigger_destroy races with
+ * psi_task_change (hotpath) which can't use locks
+ */
+ if (likely(kworker))
+ kthread_queue_delayed_work(kworker, &group->poll_work, delay);
+ else
+ atomic_set(&group->poll_scheduled, 0);
+
+ rcu_read_unlock();
+}
+
+static void psi_poll_work(struct kthread_work *work)
+{
+ struct kthread_delayed_work *dwork;
+ struct psi_group *group;
+ u32 changed_states;
+ u64 now;
+
+ dwork = container_of(work, struct kthread_delayed_work, work);
+ group = container_of(dwork, struct psi_group, poll_work);
+
+ atomic_set(&group->poll_scheduled, 0);
+
+ mutex_lock(&group->trigger_lock);
+
+ now = sched_clock();
+
+ collect_percpu_times(group, PSI_POLL, &changed_states);
+
+ if (changed_states & group->poll_states) {
+ /* Initialize trigger windows when entering polling mode */
+ if (now > group->polling_until)
+ init_triggers(group, now);
+
+ /*
+ * Keep the monitor active for at least the duration of the
+ * minimum tracking window as long as monitor states are
+ * changing.
+ */
+ group->polling_until = now +
+ group->poll_min_period * UPDATES_PER_WINDOW;
+ }
+
+ if (now > group->polling_until) {
+ group->polling_next_update = ULLONG_MAX;
+ goto out;
+ }
+
+ if (now >= group->polling_next_update)
+ group->polling_next_update = update_triggers(group, now);
+
+ psi_schedule_poll_work(group,
+ nsecs_to_jiffies(group->polling_next_update - now) + 1);
+
+out:
+ mutex_unlock(&group->trigger_lock);
+}
+
static void record_times(struct psi_group_cpu *groupc, int cpu,
bool memstall_tick)
{
@@ -460,8 +668,8 @@
groupc->times[PSI_NONIDLE] += delta;
}
-static void psi_group_change(struct psi_group *group, int cpu,
- unsigned int clear, unsigned int set)
+static u32 psi_group_change(struct psi_group *group, int cpu,
+ unsigned int clear, unsigned int set)
{
struct psi_group_cpu *groupc;
unsigned int t, m;
@@ -507,6 +715,8 @@
groupc->state_mask = state_mask;
write_seqcount_end(&groupc->seq);
+
+ return state_mask;
}
static struct psi_group *iterate_groups(struct task_struct *task, void **iter)
@@ -567,7 +777,11 @@
wake_clock = false;
while ((group = iterate_groups(task, &iter))) {
- psi_group_change(group, cpu, clear, set);
+ u32 state_mask = psi_group_change(group, cpu, clear, set);
+
+ if (state_mask & group->poll_states)
+ psi_schedule_poll_work(group, 1);
+
if (wake_clock && !delayed_work_pending(&group->avgs_work))
schedule_delayed_work(&group->avgs_work, PSI_FREQ);
}
@@ -668,6 +882,8 @@
cancel_delayed_work_sync(&cgroup->psi.avgs_work);
free_percpu(cgroup->psi.pcpu);
+ /* All triggers must be removed by now */
+ WARN_ONCE(cgroup->psi.poll_states, "psi: trigger leak\n");
}
/**
@@ -731,7 +947,7 @@
/* Update averages before reporting them */
mutex_lock(&group->avgs_lock);
now = sched_clock();
- collect_percpu_times(group, NULL);
+ collect_percpu_times(group, PSI_AVGS, NULL);
if (now >= group->avg_next_update)
group->avg_next_update = update_averages(group, now);
mutex_unlock(&group->avgs_lock);
@@ -743,7 +959,8 @@
for (w = 0; w < 3; w++)
avg[w] = group->avg[res * 2 + full][w];
- total = div_u64(group->total[res * 2 + full], NSEC_PER_USEC);
+ total = div_u64(group->total[PSI_AVGS][res * 2 + full],
+ NSEC_PER_USEC);
seq_printf(m, "%s avg10=%lu.%02lu avg60=%lu.%02lu avg300=%lu.%02lu total=%llu\n",
full ? "full" : "some",
@@ -786,25 +1003,270 @@
return single_open(file, psi_cpu_show, NULL);
}
+struct psi_trigger *psi_trigger_create(struct psi_group *group,
+ char *buf, size_t nbytes, enum psi_res res)
+{
+ struct psi_trigger *t;
+ enum psi_states state;
+ u32 threshold_us;
+ u32 window_us;
+
+ if (static_branch_likely(&psi_disabled))
+ return ERR_PTR(-EOPNOTSUPP);
+
+ if (sscanf(buf, "some %u %u", &threshold_us, &window_us) == 2)
+ state = PSI_IO_SOME + res * 2;
+ else if (sscanf(buf, "full %u %u", &threshold_us, &window_us) == 2)
+ state = PSI_IO_FULL + res * 2;
+ else
+ return ERR_PTR(-EINVAL);
+
+ if (state >= PSI_NONIDLE)
+ return ERR_PTR(-EINVAL);
+
+ if (window_us < WINDOW_MIN_US ||
+ window_us > WINDOW_MAX_US)
+ return ERR_PTR(-EINVAL);
+
+ /* Check threshold */
+ if (threshold_us == 0 || threshold_us > window_us)
+ return ERR_PTR(-EINVAL);
+
+ t = kmalloc(sizeof(*t), GFP_KERNEL);
+ if (!t)
+ return ERR_PTR(-ENOMEM);
+
+ t->group = group;
+ t->state = state;
+ t->threshold = threshold_us * NSEC_PER_USEC;
+ t->win.size = window_us * NSEC_PER_USEC;
+ window_reset(&t->win, 0, 0, 0);
+
+ t->event = 0;
+ t->last_event_time = 0;
+ init_waitqueue_head(&t->event_wait);
+ kref_init(&t->refcount);
+
+ mutex_lock(&group->trigger_lock);
+
+ if (!rcu_access_pointer(group->poll_kworker)) {
+ struct sched_param param = {
+ .sched_priority = MAX_RT_PRIO - 1,
+ };
+ struct kthread_worker *kworker;
+
+ kworker = kthread_create_worker(0, "psimon");
+ if (IS_ERR(kworker)) {
+ kfree(t);
+ mutex_unlock(&group->trigger_lock);
+ return ERR_CAST(kworker);
+ }
+ sched_setscheduler(kworker->task, SCHED_FIFO, ¶m);
+ kthread_init_delayed_work(&group->poll_work,
+ psi_poll_work);
+ rcu_assign_pointer(group->poll_kworker, kworker);
+ }
+
+ list_add(&t->node, &group->triggers);
+ group->poll_min_period = min(group->poll_min_period,
+ div_u64(t->win.size, UPDATES_PER_WINDOW));
+ group->nr_triggers[t->state]++;
+ group->poll_states |= (1 << t->state);
+
+ mutex_unlock(&group->trigger_lock);
+
+ return t;
+}
+
+static void psi_trigger_destroy(struct kref *ref)
+{
+ struct psi_trigger *t = container_of(ref, struct psi_trigger, refcount);
+ struct psi_group *group = t->group;
+ struct kthread_worker *kworker_to_destroy = NULL;
+
+ if (static_branch_likely(&psi_disabled))
+ return;
+
+ /*
+ * Wakeup waiters to stop polling. Can happen if cgroup is deleted
+ * from under a polling process.
+ */
+ wake_up_interruptible(&t->event_wait);
+
+ mutex_lock(&group->trigger_lock);
+
+ if (!list_empty(&t->node)) {
+ struct psi_trigger *tmp;
+ u64 period = ULLONG_MAX;
+
+ list_del(&t->node);
+ group->nr_triggers[t->state]--;
+ if (!group->nr_triggers[t->state])
+ group->poll_states &= ~(1 << t->state);
+ /* reset min update period for the remaining triggers */
+ list_for_each_entry(tmp, &group->triggers, node)
+ period = min(period, div_u64(tmp->win.size,
+ UPDATES_PER_WINDOW));
+ group->poll_min_period = period;
+ /* Destroy poll_kworker when the last trigger is destroyed */
+ if (group->poll_states == 0) {
+ group->polling_until = 0;
+ kworker_to_destroy = rcu_dereference_protected(
+ group->poll_kworker,
+ lockdep_is_held(&group->trigger_lock));
+ rcu_assign_pointer(group->poll_kworker, NULL);
+ }
+ }
+
+ mutex_unlock(&group->trigger_lock);
+
+ /*
+ * Wait for both *trigger_ptr from psi_trigger_replace and
+ * poll_kworker RCUs to complete their read-side critical sections
+ * before destroying the trigger and optionally the poll_kworker
+ */
+ synchronize_rcu();
+ /*
+ * Destroy the kworker after releasing trigger_lock to prevent a
+ * deadlock while waiting for psi_poll_work to acquire trigger_lock
+ */
+ if (kworker_to_destroy) {
+ kthread_cancel_delayed_work_sync(&group->poll_work);
+ kthread_destroy_worker(kworker_to_destroy);
+ }
+ kfree(t);
+}
+
+void psi_trigger_replace(void **trigger_ptr, struct psi_trigger *new)
+{
+ struct psi_trigger *old = *trigger_ptr;
+
+ if (static_branch_likely(&psi_disabled))
+ return;
+
+ rcu_assign_pointer(*trigger_ptr, new);
+ if (old)
+ kref_put(&old->refcount, psi_trigger_destroy);
+}
+
+__poll_t psi_trigger_poll(void **trigger_ptr,
+ struct file *file, poll_table *wait)
+{
+ __poll_t ret = DEFAULT_POLLMASK;
+ struct psi_trigger *t;
+
+ if (static_branch_likely(&psi_disabled))
+ return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
+
+ rcu_read_lock();
+
+ t = rcu_dereference(*(void __rcu __force **)trigger_ptr);
+ if (!t) {
+ rcu_read_unlock();
+ return DEFAULT_POLLMASK | EPOLLERR | EPOLLPRI;
+ }
+ kref_get(&t->refcount);
+
+ rcu_read_unlock();
+
+ poll_wait(file, &t->event_wait, wait);
+
+ if (cmpxchg(&t->event, 1, 0) == 1)
+ ret |= EPOLLPRI;
+
+ kref_put(&t->refcount, psi_trigger_destroy);
+
+ return ret;
+}
+
+static ssize_t psi_write(struct file *file, const char __user *user_buf,
+ size_t nbytes, enum psi_res res)
+{
+ char buf[32];
+ size_t buf_size;
+ struct seq_file *seq;
+ struct psi_trigger *new;
+
+ if (static_branch_likely(&psi_disabled))
+ return -EOPNOTSUPP;
+
+ buf_size = min(nbytes, (sizeof(buf) - 1));
+ if (copy_from_user(buf, user_buf, buf_size))
+ return -EFAULT;
+
+ buf[buf_size - 1] = '\0';
+
+ new = psi_trigger_create(&psi_system, buf, nbytes, res);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
+
+ seq = file->private_data;
+ /* Take seq->lock to protect seq->private from concurrent writes */
+ mutex_lock(&seq->lock);
+ psi_trigger_replace(&seq->private, new);
+ mutex_unlock(&seq->lock);
+
+ return nbytes;
+}
+
+static ssize_t psi_io_write(struct file *file, const char __user *user_buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return psi_write(file, user_buf, nbytes, PSI_IO);
+}
+
+static ssize_t psi_memory_write(struct file *file, const char __user *user_buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return psi_write(file, user_buf, nbytes, PSI_MEM);
+}
+
+static ssize_t psi_cpu_write(struct file *file, const char __user *user_buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return psi_write(file, user_buf, nbytes, PSI_CPU);
+}
+
+static __poll_t psi_fop_poll(struct file *file, poll_table *wait)
+{
+ struct seq_file *seq = file->private_data;
+
+ return psi_trigger_poll(&seq->private, file, wait);
+}
+
+static int psi_fop_release(struct inode *inode, struct file *file)
+{
+ struct seq_file *seq = file->private_data;
+
+ psi_trigger_replace(&seq->private, NULL);
+ return single_release(inode, file);
+}
+
static const struct file_operations psi_io_fops = {
.open = psi_io_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .write = psi_io_write,
+ .poll = psi_fop_poll,
+ .release = psi_fop_release,
};
static const struct file_operations psi_memory_fops = {
.open = psi_memory_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .write = psi_memory_write,
+ .poll = psi_fop_poll,
+ .release = psi_fop_release,
};
static const struct file_operations psi_cpu_fops = {
.open = psi_cpu_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = single_release,
+ .write = psi_cpu_write,
+ .poll = psi_fop_poll,
+ .release = psi_fop_release,
};
static int __init psi_proc_init(void)
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 8116c86..4d506f6 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -499,6 +499,12 @@
struct hrtimer_clock_base *base;
ktime_t expires;
+ /*
+ * Skip initializing cpu_base->next_timer to NULL as we skip updating
+ * next_timer in below loop if the timer is being exluded.
+ */
+ if (!exclude)
+ cpu_base->next_timer = NULL;
for_each_active_base(base, cpu_base, active) {
struct timerqueue_node *next;
struct hrtimer *timer;
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index 9864a35..6c28d51 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -1158,22 +1158,12 @@
int bpf_probe_register(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
{
- int err;
-
- mutex_lock(&bpf_event_mutex);
- err = __bpf_probe_register(btp, prog);
- mutex_unlock(&bpf_event_mutex);
- return err;
+ return __bpf_probe_register(btp, prog);
}
int bpf_probe_unregister(struct bpf_raw_event_map *btp, struct bpf_prog *prog)
{
- int err;
-
- mutex_lock(&bpf_event_mutex);
- err = tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
- mutex_unlock(&bpf_event_mutex);
- return err;
+ return tracepoint_probe_unregister(btp->tp, (void *)btp->bpf_func, prog);
}
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
diff --git a/kernel/trace/trace_events_filter.c b/kernel/trace/trace_events_filter.c
index 5574e86..5a1c64a 100644
--- a/kernel/trace/trace_events_filter.c
+++ b/kernel/trace/trace_events_filter.c
@@ -1301,7 +1301,7 @@
/* go past the last quote */
i++;
- } else if (isdigit(str[i])) {
+ } else if (isdigit(str[i]) || str[i] == '-') {
/* Make sure the field is not a string */
if (is_string_field(field)) {
@@ -1314,6 +1314,9 @@
goto err_free;
}
+ if (str[i] == '-')
+ i++;
+
/* We allow 0xDEADBEEF */
while (isalnum(str[i]))
i++;
diff --git a/lib/test_kmod.c b/lib/test_kmod.c
index d82d022..9cf7762 100644
--- a/lib/test_kmod.c
+++ b/lib/test_kmod.c
@@ -632,7 +632,7 @@
config->test_driver = NULL;
kfree_const(config->test_fs);
- config->test_driver = NULL;
+ config->test_fs = NULL;
}
static void kmod_config_free(struct kmod_test_device *test_dev)
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 37c5c51..2cf470a 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1287,11 +1287,13 @@
return PageBuddy(page) && page_order(page) >= pageblock_order;
}
-/* Return the start of the next active pageblock after a given page */
-static struct page *next_active_pageblock(struct page *page)
+/* Return the pfn of the start of the next active pageblock after a given pfn */
+static unsigned long next_active_pageblock(unsigned long pfn)
{
+ struct page *page = pfn_to_page(pfn);
+
/* Ensure the starting page is pageblock-aligned */
- BUG_ON(page_to_pfn(page) & (pageblock_nr_pages - 1));
+ BUG_ON(pfn & (pageblock_nr_pages - 1));
/* If the entire pageblock is free, move to the end of free page */
if (pageblock_free(page)) {
@@ -1299,16 +1301,16 @@
/* be careful. we don't have locks, page_order can be changed.*/
order = page_order(page);
if ((order < MAX_ORDER) && (order >= pageblock_order))
- return page + (1 << order);
+ return pfn + (1 << order);
}
- return page + pageblock_nr_pages;
+ return pfn + pageblock_nr_pages;
}
-static bool is_pageblock_removable_nolock(struct page *page)
+static bool is_pageblock_removable_nolock(unsigned long pfn)
{
+ struct page *page = pfn_to_page(pfn);
struct zone *zone;
- unsigned long pfn;
/*
* We have to be careful here because we are iterating over memory
@@ -1331,12 +1333,14 @@
/* Checks if this range of memory is likely to be hot-removable. */
bool is_mem_section_removable(unsigned long start_pfn, unsigned long nr_pages)
{
- struct page *page = pfn_to_page(start_pfn);
- struct page *end_page = page + nr_pages;
+ unsigned long end_pfn, pfn;
+
+ end_pfn = min(start_pfn + nr_pages,
+ zone_end_pfn(page_zone(pfn_to_page(start_pfn))));
/* Check the starting page of each pageblock within the range */
- for (; page < end_page; page = next_active_pageblock(page)) {
- if (!is_pageblock_removable_nolock(page))
+ for (pfn = start_pfn; pfn < end_pfn; pfn = next_active_pageblock(pfn)) {
+ if (!is_pageblock_removable_nolock(pfn))
return false;
cond_resched();
}
@@ -1372,6 +1376,9 @@
i++;
if (i == MAX_ORDER_NR_PAGES || pfn + i >= end_pfn)
continue;
+ /* Check if we got outside of the zone */
+ if (zone && !zone_spans_pfn(zone, pfn + i))
+ return 0;
page = pfn_to_page(pfn + i);
if (zone && page_zone(page) != zone)
return 0;
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index e8090f0..ef0dec2 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -104,6 +104,9 @@
ret = cfg80211_get_station(real_netdev, neigh->addr, &sinfo);
+ /* free the TID stats immediately */
+ cfg80211_sinfo_release_content(&sinfo);
+
dev_put(real_netdev);
if (ret == -ENOENT) {
/* Node is not associated anymore! It would be
diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c
index 6fa61b8..a4d6d77 100644
--- a/net/bluetooth/af_bluetooth.c
+++ b/net/bluetooth/af_bluetooth.c
@@ -183,15 +183,25 @@
}
EXPORT_SYMBOL(bt_sock_unlink);
-void bt_accept_enqueue(struct sock *parent, struct sock *sk)
+void bt_accept_enqueue(struct sock *parent, struct sock *sk, bool bh)
{
BT_DBG("parent %p, sk %p", parent, sk);
sock_hold(sk);
- lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
+ if (bh)
+ bh_lock_sock_nested(sk);
+ else
+ lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
+
list_add_tail(&bt_sk(sk)->accept_q, &bt_sk(parent)->accept_q);
bt_sk(sk)->parent = parent;
- release_sock(sk);
+
+ if (bh)
+ bh_unlock_sock(sk);
+ else
+ release_sock(sk);
+
parent->sk_ack_backlog++;
}
EXPORT_SYMBOL(bt_accept_enqueue);
diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
index 686bdc6..a3a2cd5 100644
--- a/net/bluetooth/l2cap_sock.c
+++ b/net/bluetooth/l2cap_sock.c
@@ -1252,7 +1252,7 @@
l2cap_sock_init(sk, parent);
- bt_accept_enqueue(parent, sk);
+ bt_accept_enqueue(parent, sk, false);
release_sock(parent);
diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
index d606e92..c044ff2 100644
--- a/net/bluetooth/rfcomm/sock.c
+++ b/net/bluetooth/rfcomm/sock.c
@@ -988,7 +988,7 @@
rfcomm_pi(sk)->channel = channel;
sk->sk_state = BT_CONFIG;
- bt_accept_enqueue(parent, sk);
+ bt_accept_enqueue(parent, sk, true);
/* Accept connection and return socket DLC */
*d = rfcomm_pi(sk)->dlc;
diff --git a/net/bluetooth/sco.c b/net/bluetooth/sco.c
index 8f0f927..a4ca55d 100644
--- a/net/bluetooth/sco.c
+++ b/net/bluetooth/sco.c
@@ -193,7 +193,7 @@
conn->sk = sk;
if (parent)
- bt_accept_enqueue(parent, sk);
+ bt_accept_enqueue(parent, sk, true);
}
static int sco_chan_add(struct sco_conn *conn, struct sock *sk,
diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
index 5e55cef..6693e20 100644
--- a/net/bridge/netfilter/ebtables.c
+++ b/net/bridge/netfilter/ebtables.c
@@ -2293,9 +2293,12 @@
xt_compat_lock(NFPROTO_BRIDGE);
- ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
- if (ret < 0)
- goto out_unlock;
+ if (tmp.nentries) {
+ ret = xt_compat_init_offsets(NFPROTO_BRIDGE, tmp.nentries);
+ if (ret < 0)
+ goto out_unlock;
+ }
+
ret = compat_copy_entries(entries_tmp, tmp.entries_size, &state);
if (ret < 0)
goto out_unlock;
diff --git a/net/core/filter.c b/net/core/filter.c
index fb0080e..bed9061 100644
--- a/net/core/filter.c
+++ b/net/core/filter.c
@@ -3909,10 +3909,12 @@
/* Only some socketops are supported */
switch (optname) {
case SO_RCVBUF:
+ val = min_t(u32, val, sysctl_rmem_max);
sk->sk_userlocks |= SOCK_RCVBUF_LOCK;
sk->sk_rcvbuf = max_t(int, val * 2, SOCK_MIN_RCVBUF);
break;
case SO_SNDBUF:
+ val = min_t(u32, val, sysctl_wmem_max);
sk->sk_userlocks |= SOCK_SNDBUF_LOCK;
sk->sk_sndbuf = max_t(int, val * 2, SOCK_MIN_SNDBUF);
break;
diff --git a/net/core/gen_stats.c b/net/core/gen_stats.c
index 188d693..e2fd8ba 100644
--- a/net/core/gen_stats.c
+++ b/net/core/gen_stats.c
@@ -256,7 +256,6 @@
for_each_possible_cpu(i) {
const struct gnet_stats_queue *qcpu = per_cpu_ptr(q, i);
- qstats->qlen = 0;
qstats->backlog += qcpu->backlog;
qstats->drops += qcpu->drops;
qstats->requeues += qcpu->requeues;
@@ -272,7 +271,6 @@
if (cpu) {
__gnet_stats_copy_queue_cpu(qstats, cpu);
} else {
- qstats->qlen = q->qlen;
qstats->backlog = q->backlog;
qstats->drops = q->drops;
qstats->requeues = q->requeues;
diff --git a/net/core/gro_cells.c b/net/core/gro_cells.c
index acf45dd..e095fb8 100644
--- a/net/core/gro_cells.c
+++ b/net/core/gro_cells.c
@@ -13,22 +13,36 @@
{
struct net_device *dev = skb->dev;
struct gro_cell *cell;
+ int res;
- if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev))
- return netif_rx(skb);
+ rcu_read_lock();
+ if (unlikely(!(dev->flags & IFF_UP)))
+ goto drop;
+
+ if (!gcells->cells || skb_cloned(skb) || netif_elide_gro(dev)) {
+ res = netif_rx(skb);
+ goto unlock;
+ }
cell = this_cpu_ptr(gcells->cells);
if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
+drop:
atomic_long_inc(&dev->rx_dropped);
kfree_skb(skb);
- return NET_RX_DROP;
+ res = NET_RX_DROP;
+ goto unlock;
}
__skb_queue_tail(&cell->napi_skbs, skb);
if (skb_queue_len(&cell->napi_skbs) == 1)
napi_schedule(&cell->napi);
- return NET_RX_SUCCESS;
+
+ res = NET_RX_SUCCESS;
+
+unlock:
+ rcu_read_unlock();
+ return res;
}
EXPORT_SYMBOL(gro_cells_receive);
diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
index bd67c4d..2aabb7e 100644
--- a/net/core/net-sysfs.c
+++ b/net/core/net-sysfs.c
@@ -1547,6 +1547,9 @@
error:
netdev_queue_update_kobjects(dev, txq, 0);
net_rx_queue_update_kobjects(dev, rxq, 0);
+#ifdef CONFIG_SYSFS
+ kset_unregister(dev->queues_kset);
+#endif
return error;
}
diff --git a/net/hsr/hsr_device.c b/net/hsr/hsr_device.c
index b8cd43c..a97bf32 100644
--- a/net/hsr/hsr_device.c
+++ b/net/hsr/hsr_device.c
@@ -94,9 +94,8 @@
&& (old_operstate != IF_OPER_UP)) {
/* Went up */
hsr->announce_count = 0;
- hsr->announce_timer.expires = jiffies +
- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
- add_timer(&hsr->announce_timer);
+ mod_timer(&hsr->announce_timer,
+ jiffies + msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL));
}
if ((hsr_dev->operstate != IF_OPER_UP) && (old_operstate == IF_OPER_UP))
@@ -332,6 +331,7 @@
{
struct hsr_priv *hsr;
struct hsr_port *master;
+ unsigned long interval;
hsr = from_timer(hsr, t, announce_timer);
@@ -343,18 +343,16 @@
hsr->protVersion);
hsr->announce_count++;
- hsr->announce_timer.expires = jiffies +
- msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
+ interval = msecs_to_jiffies(HSR_ANNOUNCE_INTERVAL);
} else {
send_hsr_supervision_frame(master, HSR_TLV_LIFE_CHECK,
hsr->protVersion);
- hsr->announce_timer.expires = jiffies +
- msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
+ interval = msecs_to_jiffies(HSR_LIFE_CHECK_INTERVAL);
}
if (is_admin_up(master->dev))
- add_timer(&hsr->announce_timer);
+ mod_timer(&hsr->announce_timer, jiffies + interval);
rcu_read_unlock();
}
@@ -486,7 +484,7 @@
res = hsr_add_port(hsr, hsr_dev, HSR_PT_MASTER);
if (res)
- return res;
+ goto err_add_port;
res = register_netdevice(hsr_dev);
if (res)
@@ -506,6 +504,8 @@
fail:
hsr_for_each_port(hsr, port)
hsr_del_port(port);
+err_add_port:
+ hsr_del_node(&hsr->self_node_db);
return res;
}
diff --git a/net/hsr/hsr_framereg.c b/net/hsr/hsr_framereg.c
index 286ceb4..9af16cb 100644
--- a/net/hsr/hsr_framereg.c
+++ b/net/hsr/hsr_framereg.c
@@ -124,6 +124,18 @@
return 0;
}
+void hsr_del_node(struct list_head *self_node_db)
+{
+ struct hsr_node *node;
+
+ rcu_read_lock();
+ node = list_first_or_null_rcu(self_node_db, struct hsr_node, mac_list);
+ rcu_read_unlock();
+ if (node) {
+ list_del_rcu(&node->mac_list);
+ kfree(node);
+ }
+}
/* Allocate an hsr_node and add it to node_db. 'addr' is the node's AddressA;
* seq_out is used to initialize filtering of outgoing duplicate frames
diff --git a/net/hsr/hsr_framereg.h b/net/hsr/hsr_framereg.h
index 370b459..531fd3d 100644
--- a/net/hsr/hsr_framereg.h
+++ b/net/hsr/hsr_framereg.h
@@ -16,6 +16,7 @@
struct hsr_node;
+void hsr_del_node(struct list_head *self_node_db);
struct hsr_node *hsr_add_node(struct list_head *node_db, unsigned char addr[],
u16 seq_out);
struct hsr_node *hsr_get_node(struct hsr_port *port, struct sk_buff *skb,
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 777fa3b..f0165c5 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -667,7 +667,8 @@
case CIPSO_V4_MAP_PASS:
return 0;
case CIPSO_V4_MAP_TRANS:
- if (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL)
+ if ((level < doi_def->map.std->lvl.cipso_size) &&
+ (doi_def->map.std->lvl.cipso[level] < CIPSO_V4_INV_LVL))
return 0;
break;
}
@@ -1735,13 +1736,26 @@
*/
void cipso_v4_error(struct sk_buff *skb, int error, u32 gateway)
{
+ unsigned char optbuf[sizeof(struct ip_options) + 40];
+ struct ip_options *opt = (struct ip_options *)optbuf;
+
if (ip_hdr(skb)->protocol == IPPROTO_ICMP || error != -EACCES)
return;
+ /*
+ * We might be called above the IP layer,
+ * so we can not use icmp_send and IPCB here.
+ */
+
+ memset(opt, 0, sizeof(struct ip_options));
+ opt->optlen = ip_hdr(skb)->ihl*4 - sizeof(struct iphdr);
+ if (__ip_options_compile(dev_net(skb->dev), opt, skb, NULL))
+ return;
+
if (gateway)
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_NET_ANO, 0, opt);
else
- icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0);
+ __icmp_send(skb, ICMP_DEST_UNREACH, ICMP_HOST_ANO, 0, opt);
}
/**
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 958e185..dae743b 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -700,6 +700,10 @@
case RTA_GATEWAY:
cfg->fc_gw = nla_get_be32(attr);
break;
+ case RTA_VIA:
+ NL_SET_ERR_MSG(extack, "IPv4 does not support RTA_VIA attribute");
+ err = -EINVAL;
+ goto errout;
case RTA_PRIORITY:
cfg->fc_priority = nla_get_u32(attr);
break;
diff --git a/net/ipv4/icmp.c b/net/ipv4/icmp.c
index 695979b..ad75c46 100644
--- a/net/ipv4/icmp.c
+++ b/net/ipv4/icmp.c
@@ -570,7 +570,8 @@
* MUST reply to only the first fragment.
*/
-void icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info)
+void __icmp_send(struct sk_buff *skb_in, int type, int code, __be32 info,
+ const struct ip_options *opt)
{
struct iphdr *iph;
int room;
@@ -691,7 +692,7 @@
iph->tos;
mark = IP4_REPLY_MARK(net, skb_in->mark);
- if (ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in))
+ if (__ip_options_echo(net, &icmp_param.replyopts.opt.opt, skb_in, opt))
goto out_unlock;
@@ -742,7 +743,7 @@
local_bh_enable();
out:;
}
-EXPORT_SYMBOL(icmp_send);
+EXPORT_SYMBOL(__icmp_send);
static void icmp_socket_deliver(struct sk_buff *skb, u32 info)
diff --git a/net/ipv4/ip_input.c b/net/ipv4/ip_input.c
index 797c4ff..0680f87 100644
--- a/net/ipv4/ip_input.c
+++ b/net/ipv4/ip_input.c
@@ -307,11 +307,10 @@
}
static int ip_rcv_finish_core(struct net *net, struct sock *sk,
- struct sk_buff *skb)
+ struct sk_buff *skb, struct net_device *dev)
{
const struct iphdr *iph = ip_hdr(skb);
int (*edemux)(struct sk_buff *skb);
- struct net_device *dev = skb->dev;
struct rtable *rt;
int err;
@@ -400,6 +399,7 @@
static int ip_rcv_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
+ struct net_device *dev = skb->dev;
int ret;
/* if ingress device is enslaved to an L3 master device pass the
@@ -409,7 +409,7 @@
if (!skb)
return NET_RX_SUCCESS;
- ret = ip_rcv_finish_core(net, sk, skb);
+ ret = ip_rcv_finish_core(net, sk, skb, dev);
if (ret != NET_RX_DROP)
ret = dst_input(skb);
return ret;
@@ -549,6 +549,7 @@
INIT_LIST_HEAD(&sublist);
list_for_each_entry_safe(skb, next, head, list) {
+ struct net_device *dev = skb->dev;
struct dst_entry *dst;
skb_list_del_init(skb);
@@ -558,7 +559,7 @@
skb = l3mdev_ip_rcv(skb);
if (!skb)
continue;
- if (ip_rcv_finish_core(net, sk, skb) == NET_RX_DROP)
+ if (ip_rcv_finish_core(net, sk, skb, dev) == NET_RX_DROP)
continue;
dst = skb_dst(skb);
diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c
index ed194d4..32a3504 100644
--- a/net/ipv4/ip_options.c
+++ b/net/ipv4/ip_options.c
@@ -251,8 +251,9 @@
* If opt == NULL, then skb->data should point to IP header.
*/
-int ip_options_compile(struct net *net,
- struct ip_options *opt, struct sk_buff *skb)
+int __ip_options_compile(struct net *net,
+ struct ip_options *opt, struct sk_buff *skb,
+ __be32 *info)
{
__be32 spec_dst = htonl(INADDR_ANY);
unsigned char *pp_ptr = NULL;
@@ -468,11 +469,22 @@
return 0;
error:
- if (skb) {
- icmp_send(skb, ICMP_PARAMETERPROB, 0, htonl((pp_ptr-iph)<<24));
- }
+ if (info)
+ *info = htonl((pp_ptr-iph)<<24);
return -EINVAL;
}
+
+int ip_options_compile(struct net *net,
+ struct ip_options *opt, struct sk_buff *skb)
+{
+ int ret;
+ __be32 info;
+
+ ret = __ip_options_compile(net, opt, skb, &info);
+ if (ret != 0 && skb)
+ icmp_send(skb, ICMP_PARAMETERPROB, 0, info);
+ return ret;
+}
EXPORT_SYMBOL(ip_options_compile);
/*
diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
index 7f56944..40a7cd5 100644
--- a/net/ipv4/ip_vti.c
+++ b/net/ipv4/ip_vti.c
@@ -74,6 +74,33 @@
return 0;
}
+static int vti_input_ipip(struct sk_buff *skb, int nexthdr, __be32 spi,
+ int encap_type)
+{
+ struct ip_tunnel *tunnel;
+ const struct iphdr *iph = ip_hdr(skb);
+ struct net *net = dev_net(skb->dev);
+ struct ip_tunnel_net *itn = net_generic(net, vti_net_id);
+
+ tunnel = ip_tunnel_lookup(itn, skb->dev->ifindex, TUNNEL_NO_KEY,
+ iph->saddr, iph->daddr, 0);
+ if (tunnel) {
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ goto drop;
+
+ XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
+
+ skb->dev = tunnel->dev;
+
+ return xfrm_input(skb, nexthdr, spi, encap_type);
+ }
+
+ return -EINVAL;
+drop:
+ kfree_skb(skb);
+ return 0;
+}
+
static int vti_rcv(struct sk_buff *skb)
{
XFRM_SPI_SKB_CB(skb)->family = AF_INET;
@@ -82,6 +109,14 @@
return vti_input(skb, ip_hdr(skb)->protocol, 0, 0);
}
+static int vti_rcv_ipip(struct sk_buff *skb)
+{
+ XFRM_SPI_SKB_CB(skb)->family = AF_INET;
+ XFRM_SPI_SKB_CB(skb)->daddroff = offsetof(struct iphdr, daddr);
+
+ return vti_input_ipip(skb, ip_hdr(skb)->protocol, ip_hdr(skb)->saddr, 0);
+}
+
static int vti_rcv_cb(struct sk_buff *skb, int err)
{
unsigned short family;
@@ -435,6 +470,12 @@
.priority = 100,
};
+static struct xfrm_tunnel ipip_handler __read_mostly = {
+ .handler = vti_rcv_ipip,
+ .err_handler = vti4_err,
+ .priority = 0,
+};
+
static int __net_init vti_init_net(struct net *net)
{
int err;
@@ -603,6 +644,13 @@
if (err < 0)
goto xfrm_proto_comp_failed;
+ msg = "ipip tunnel";
+ err = xfrm4_tunnel_register(&ipip_handler, AF_INET);
+ if (err < 0) {
+ pr_info("%s: cant't register tunnel\n",__func__);
+ goto xfrm_tunnel_failed;
+ }
+
msg = "netlink interface";
err = rtnl_link_register(&vti_link_ops);
if (err < 0)
@@ -612,6 +660,8 @@
rtnl_link_failed:
xfrm4_protocol_deregister(&vti_ipcomp4_protocol, IPPROTO_COMP);
+xfrm_tunnel_failed:
+ xfrm4_tunnel_deregister(&ipip_handler, AF_INET);
xfrm_proto_comp_failed:
xfrm4_protocol_deregister(&vti_ah4_protocol, IPPROTO_AH);
xfrm_proto_ah_failed:
diff --git a/net/ipv4/netlink.c b/net/ipv4/netlink.c
index f86bb4f..d8e3a1f 100644
--- a/net/ipv4/netlink.c
+++ b/net/ipv4/netlink.c
@@ -3,9 +3,10 @@
#include <linux/types.h>
#include <net/net_namespace.h>
#include <net/netlink.h>
+#include <linux/in6.h>
#include <net/ip.h>
-int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto,
+int rtm_getroute_parse_ip_proto(struct nlattr *attr, u8 *ip_proto, u8 family,
struct netlink_ext_ack *extack)
{
*ip_proto = nla_get_u8(attr);
@@ -13,11 +14,19 @@
switch (*ip_proto) {
case IPPROTO_TCP:
case IPPROTO_UDP:
- case IPPROTO_ICMP:
return 0;
- default:
- NL_SET_ERR_MSG(extack, "Unsupported ip proto");
- return -EOPNOTSUPP;
+ case IPPROTO_ICMP:
+ if (family != AF_INET)
+ break;
+ return 0;
+#if IS_ENABLED(CONFIG_IPV6)
+ case IPPROTO_ICMPV6:
+ if (family != AF_INET6)
+ break;
+ return 0;
+#endif
}
+ NL_SET_ERR_MSG(extack, "Unsupported ip proto");
+ return -EOPNOTSUPP;
}
EXPORT_SYMBOL_GPL(rtm_getroute_parse_ip_proto);
diff --git a/net/ipv4/route.c b/net/ipv4/route.c
index 436b46c..7a556e4 100644
--- a/net/ipv4/route.c
+++ b/net/ipv4/route.c
@@ -1308,6 +1308,10 @@
if (fnhe->fnhe_daddr == daddr) {
rcu_assign_pointer(*fnhe_p, rcu_dereference_protected(
fnhe->fnhe_next, lockdep_is_held(&fnhe_lock)));
+ /* set fnhe_daddr to 0 to ensure it won't bind with
+ * new dsts in rt_bind_exception().
+ */
+ fnhe->fnhe_daddr = 0;
fnhe_flush_routes(fnhe);
kfree_rcu(fnhe, rcu);
break;
@@ -2155,12 +2159,13 @@
int our = 0;
int err = -EINVAL;
- if (in_dev)
- our = ip_check_mc_rcu(in_dev, daddr, saddr,
- ip_hdr(skb)->protocol);
+ if (!in_dev)
+ return err;
+ our = ip_check_mc_rcu(in_dev, daddr, saddr,
+ ip_hdr(skb)->protocol);
/* check l3 master if no match yet */
- if ((!in_dev || !our) && netif_is_l3_slave(dev)) {
+ if (!our && netif_is_l3_slave(dev)) {
struct in_device *l3_in_dev;
l3_in_dev = __in_dev_get_rcu(skb->dev);
@@ -2814,7 +2819,7 @@
if (tb[RTA_IP_PROTO]) {
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
- &ip_proto, extack);
+ &ip_proto, AF_INET, extack);
if (err)
return err;
}
diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c
index c3387df..f66b2e6 100644
--- a/net/ipv4/syncookies.c
+++ b/net/ipv4/syncookies.c
@@ -216,7 +216,12 @@
refcount_set(&req->rsk_refcnt, 1);
tcp_sk(child)->tsoffset = tsoff;
sock_rps_save_rxhash(child, skb);
- inet_csk_reqsk_queue_add(sk, req, child);
+ if (!inet_csk_reqsk_queue_add(sk, req, child)) {
+ bh_unlock_sock(child);
+ sock_put(child);
+ child = NULL;
+ reqsk_put(req);
+ }
} else {
reqsk_free(req);
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index d561464..ca38aca 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -1901,6 +1901,11 @@
inq = tp->rcv_nxt - tp->copied_seq;
release_sock(sk);
}
+ /* After receiving a FIN, tell the user-space to continue reading
+ * by returning a non-zero inq.
+ */
+ if (inq == 0 && sock_flag(sk, SOCK_DONE))
+ inq = 1;
return inq;
}
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 0e9fbdf..16f2c84 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -6493,7 +6493,13 @@
af_ops->send_synack(fastopen_sk, dst, &fl, req,
&foc, TCP_SYNACK_FASTOPEN);
/* Add the child socket directly into the accept queue */
- inet_csk_reqsk_queue_add(sk, req, fastopen_sk);
+ if (!inet_csk_reqsk_queue_add(sk, req, fastopen_sk)) {
+ reqsk_fastopen_remove(fastopen_sk, req, false);
+ bh_unlock_sock(fastopen_sk);
+ sock_put(fastopen_sk);
+ reqsk_put(req);
+ goto drop;
+ }
sk->sk_data_ready(sk);
bh_unlock_sock(fastopen_sk);
sock_put(fastopen_sk);
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 5f880b0..ce66c23 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -1646,15 +1646,8 @@
int tcp_filter(struct sock *sk, struct sk_buff *skb)
{
struct tcphdr *th = (struct tcphdr *)skb->data;
- unsigned int eaten = skb->len;
- int err;
- err = sk_filter_trim_cap(sk, skb, th->doff * 4);
- if (!err) {
- eaten -= skb->len;
- TCP_SKB_CB(skb)->end_seq -= eaten;
- }
- return err;
+ return sk_filter_trim_cap(sk, skb, th->doff * 4);
}
EXPORT_SYMBOL(tcp_filter);
diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c
index 10aafea..35e7092 100644
--- a/net/ipv6/ip6mr.c
+++ b/net/ipv6/ip6mr.c
@@ -1954,10 +1954,10 @@
static inline int ip6mr_forward2_finish(struct net *net, struct sock *sk, struct sk_buff *skb)
{
- __IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_OUTFORWDATAGRAMS);
- __IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
- IPSTATS_MIB_OUTOCTETS, skb->len);
+ IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_OUTFORWDATAGRAMS);
+ IP6_ADD_STATS(net, ip6_dst_idev(skb_dst(skb)),
+ IPSTATS_MIB_OUTOCTETS, skb->len);
return dst_output(net, sk, skb);
}
diff --git a/net/ipv6/route.c b/net/ipv6/route.c
index 7b832c3..509a49f 100644
--- a/net/ipv6/route.c
+++ b/net/ipv6/route.c
@@ -1282,18 +1282,29 @@
static void rt6_remove_exception(struct rt6_exception_bucket *bucket,
struct rt6_exception *rt6_ex)
{
+ struct fib6_info *from;
struct net *net;
if (!bucket || !rt6_ex)
return;
net = dev_net(rt6_ex->rt6i->dst.dev);
+ net->ipv6.rt6_stats->fib_rt_cache--;
+
+ /* purge completely the exception to allow releasing the held resources:
+ * some [sk] cache may keep the dst around for unlimited time
+ */
+ from = rcu_dereference_protected(rt6_ex->rt6i->from,
+ lockdep_is_held(&rt6_exception_lock));
+ rcu_assign_pointer(rt6_ex->rt6i->from, NULL);
+ fib6_info_release(from);
+ dst_dev_put(&rt6_ex->rt6i->dst);
+
hlist_del_rcu(&rt6_ex->hlist);
dst_release(&rt6_ex->rt6i->dst);
kfree_rcu(rt6_ex, rcu);
WARN_ON_ONCE(!bucket->depth);
bucket->depth--;
- net->ipv6.rt6_stats->fib_rt_cache--;
}
/* Remove oldest rt6_ex in bucket and free the memory
@@ -1612,15 +1623,15 @@
static void rt6_update_exception_stamp_rt(struct rt6_info *rt)
{
struct rt6_exception_bucket *bucket;
- struct fib6_info *from = rt->from;
struct in6_addr *src_key = NULL;
struct rt6_exception *rt6_ex;
-
- if (!from ||
- !(rt->rt6i_flags & RTF_CACHE))
- return;
+ struct fib6_info *from;
rcu_read_lock();
+ from = rcu_dereference(rt->from);
+ if (!from || !(rt->rt6i_flags & RTF_CACHE))
+ goto unlock;
+
bucket = rcu_dereference(from->rt6i_exception_bucket);
#ifdef CONFIG_IPV6_SUBTREES
@@ -1639,6 +1650,7 @@
if (rt6_ex)
rt6_ex->stamp = jiffies;
+unlock:
rcu_read_unlock();
}
@@ -2796,20 +2808,24 @@
u32 tbid = l3mdev_fib_table(dev) ? : RT_TABLE_MAIN;
const struct in6_addr *gw_addr = &cfg->fc_gateway;
u32 flags = RTF_LOCAL | RTF_ANYCAST | RTF_REJECT;
+ struct fib6_info *from;
struct rt6_info *grt;
int err;
err = 0;
grt = ip6_nh_lookup_table(net, cfg, gw_addr, tbid, 0);
if (grt) {
+ rcu_read_lock();
+ from = rcu_dereference(grt->from);
if (!grt->dst.error &&
/* ignore match if it is the default route */
- grt->from && !ipv6_addr_any(&grt->from->fib6_dst.addr) &&
+ from && !ipv6_addr_any(&from->fib6_dst.addr) &&
(grt->rt6i_flags & flags || dev != grt->dst.dev)) {
NL_SET_ERR_MSG(extack,
"Nexthop has invalid gateway or device mismatch");
err = -EINVAL;
}
+ rcu_read_unlock();
ip6_rt_put(grt);
}
@@ -4189,6 +4205,10 @@
cfg->fc_gateway = nla_get_in6_addr(tb[RTA_GATEWAY]);
cfg->fc_flags |= RTF_GATEWAY;
}
+ if (tb[RTA_VIA]) {
+ NL_SET_ERR_MSG(extack, "IPv6 does not support RTA_VIA attribute");
+ goto errout;
+ }
if (tb[RTA_DST]) {
int plen = (rtm->rtm_dst_len + 7) >> 3;
@@ -4682,7 +4702,7 @@
table = rt->fib6_table->tb6_id;
else
table = RT6_TABLE_UNSPEC;
- rtm->rtm_table = table;
+ rtm->rtm_table = table < 256 ? table : RT_TABLE_COMPAT;
if (nla_put_u32(skb, RTA_TABLE, table))
goto nla_put_failure;
@@ -4883,7 +4903,8 @@
if (tb[RTA_IP_PROTO]) {
err = rtm_getroute_parse_ip_proto(tb[RTA_IP_PROTO],
- &fl6.flowi6_proto, extack);
+ &fl6.flowi6_proto, AF_INET6,
+ extack);
if (err)
goto errout;
}
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index da6d5a3..de9aa5c 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -778,8 +778,9 @@
pbw0 = tunnel->ip6rd.prefixlen >> 5;
pbi0 = tunnel->ip6rd.prefixlen & 0x1f;
- d = (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
- tunnel->ip6rd.relay_prefixlen;
+ d = tunnel->ip6rd.relay_prefixlen < 32 ?
+ (ntohl(v6dst->s6_addr32[pbw0]) << pbi0) >>
+ tunnel->ip6rd.relay_prefixlen : 0;
pbi1 = pbi0 - tunnel->ip6rd.relay_prefixlen;
if (pbi1 > 0)
@@ -1873,6 +1874,7 @@
err_reg_dev:
ipip6_dev_free(sitn->fb_tunnel_dev);
+ free_netdev(sitn->fb_tunnel_dev);
err_alloc_dev:
return err;
}
diff --git a/net/l2tp/l2tp_ip6.c b/net/l2tp/l2tp_ip6.c
index 0ae6899..37a69df 100644
--- a/net/l2tp/l2tp_ip6.c
+++ b/net/l2tp/l2tp_ip6.c
@@ -674,9 +674,6 @@
if (flags & MSG_OOB)
goto out;
- if (addr_len)
- *addr_len = sizeof(*lsa);
-
if (flags & MSG_ERRQUEUE)
return ipv6_recv_error(sk, msg, len, addr_len);
@@ -706,6 +703,7 @@
lsa->l2tp_conn_id = 0;
if (ipv6_addr_type(&lsa->l2tp_addr) & IPV6_ADDR_LINKLOCAL)
lsa->l2tp_scope_id = inet6_iif(skb);
+ *addr_len = sizeof(*lsa);
}
if (np->rxopt.all)
diff --git a/net/mpls/af_mpls.c b/net/mpls/af_mpls.c
index 8fbe6cd..d5a4db5 100644
--- a/net/mpls/af_mpls.c
+++ b/net/mpls/af_mpls.c
@@ -1822,6 +1822,9 @@
goto errout;
break;
}
+ case RTA_GATEWAY:
+ NL_SET_ERR_MSG(extack, "MPLS does not support RTA_GATEWAY attribute");
+ goto errout;
case RTA_VIA:
{
if (nla_get_via(nla, &cfg->rc_via_alen,
diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
index 518364f..55a7731 100644
--- a/net/netfilter/ipvs/ip_vs_ctl.c
+++ b/net/netfilter/ipvs/ip_vs_ctl.c
@@ -2221,6 +2221,18 @@
u->udp_timeout);
#ifdef CONFIG_IP_VS_PROTO_TCP
+ if (u->tcp_timeout < 0 || u->tcp_timeout > (INT_MAX / HZ) ||
+ u->tcp_fin_timeout < 0 || u->tcp_fin_timeout > (INT_MAX / HZ)) {
+ return -EINVAL;
+ }
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_UDP
+ if (u->udp_timeout < 0 || u->udp_timeout > (INT_MAX / HZ))
+ return -EINVAL;
+#endif
+
+#ifdef CONFIG_IP_VS_PROTO_TCP
if (u->tcp_timeout) {
pd = ip_vs_proto_data_get(ipvs, IPPROTO_TCP);
pd->timeout_table[IP_VS_TCP_S_ESTABLISHED]
diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
index 277d02a..895171a 100644
--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1007,6 +1007,22 @@
}
if (nf_ct_key_equal(h, tuple, zone, net)) {
+ /* Tuple is taken already, so caller will need to find
+ * a new source port to use.
+ *
+ * Only exception:
+ * If the *original tuples* are identical, then both
+ * conntracks refer to the same flow.
+ * This is a rare situation, it can occur e.g. when
+ * more than one UDP packet is sent from same socket
+ * in different threads.
+ *
+ * Let nf_ct_resolve_clash() deal with this later.
+ */
+ if (nf_ct_tuple_equal(&ignored_conntrack->tuplehash[IP_CT_DIR_ORIGINAL].tuple,
+ &ct->tuplehash[IP_CT_DIR_ORIGINAL].tuple))
+ continue;
+
NF_CT_STAT_INC_ATOMIC(net, found);
rcu_read_unlock();
return 1;
diff --git a/net/netlabel/netlabel_kapi.c b/net/netlabel/netlabel_kapi.c
index ea7c670..ee3e5b6 100644
--- a/net/netlabel/netlabel_kapi.c
+++ b/net/netlabel/netlabel_kapi.c
@@ -903,7 +903,8 @@
(state == 0 && (byte & bitmask) == 0))
return bit_spot;
- bit_spot++;
+ if (++bit_spot >= bitmap_len)
+ return -1;
bitmask >>= 1;
if (bitmask == 0) {
byte = bitmap[++byte_offset];
diff --git a/net/nfc/llcp_commands.c b/net/nfc/llcp_commands.c
index 6a196e4..d1fc019e 100644
--- a/net/nfc/llcp_commands.c
+++ b/net/nfc/llcp_commands.c
@@ -419,6 +419,10 @@
sock->service_name,
sock->service_name_len,
&service_name_tlv_length);
+ if (!service_name_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += service_name_tlv_length;
}
@@ -429,9 +433,17 @@
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
&miux_tlv_length);
+ if (!miux_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += miux_tlv_length;
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+ if (!rw_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += rw_tlv_length;
pr_debug("SKB size %d SN length %zu\n", size, sock->service_name_len);
@@ -484,9 +496,17 @@
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&miux, 0,
&miux_tlv_length);
+ if (!miux_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += miux_tlv_length;
rw_tlv = nfc_llcp_build_tlv(LLCP_TLV_RW, &rw, 0, &rw_tlv_length);
+ if (!rw_tlv) {
+ err = -ENOMEM;
+ goto error_tlv;
+ }
size += rw_tlv_length;
skb = llcp_allocate_pdu(sock, LLCP_PDU_CC, size);
diff --git a/net/nfc/llcp_core.c b/net/nfc/llcp_core.c
index ef4026a..4fa0152 100644
--- a/net/nfc/llcp_core.c
+++ b/net/nfc/llcp_core.c
@@ -532,10 +532,10 @@
static int nfc_llcp_build_gb(struct nfc_llcp_local *local)
{
- u8 *gb_cur, *version_tlv, version, version_length;
- u8 *lto_tlv, lto_length;
- u8 *wks_tlv, wks_length;
- u8 *miux_tlv, miux_length;
+ u8 *gb_cur, version, version_length;
+ u8 lto_length, wks_length, miux_length;
+ u8 *version_tlv = NULL, *lto_tlv = NULL,
+ *wks_tlv = NULL, *miux_tlv = NULL;
__be16 wks = cpu_to_be16(local->local_wks);
u8 gb_len = 0;
int ret = 0;
@@ -543,17 +543,33 @@
version = LLCP_VERSION_11;
version_tlv = nfc_llcp_build_tlv(LLCP_TLV_VERSION, &version,
1, &version_length);
+ if (!version_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += version_length;
lto_tlv = nfc_llcp_build_tlv(LLCP_TLV_LTO, &local->lto, 1, <o_length);
+ if (!lto_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += lto_length;
pr_debug("Local wks 0x%lx\n", local->local_wks);
wks_tlv = nfc_llcp_build_tlv(LLCP_TLV_WKS, (u8 *)&wks, 2, &wks_length);
+ if (!wks_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += wks_length;
miux_tlv = nfc_llcp_build_tlv(LLCP_TLV_MIUX, (u8 *)&local->miux, 0,
&miux_length);
+ if (!miux_tlv) {
+ ret = -ENOMEM;
+ goto out;
+ }
gb_len += miux_length;
gb_len += ARRAY_SIZE(llcp_magic);
diff --git a/net/qrtr/Kconfig b/net/qrtr/Kconfig
index ee794d5..8b9d3c2 100644
--- a/net/qrtr/Kconfig
+++ b/net/qrtr/Kconfig
@@ -47,4 +47,12 @@
transport provides bulk endpoints to facilitate sending and receiving
IPC Router data packets.
+config QRTR_FIFO
+ tristate "FIFO IPC Router channels"
+ help
+ Say Y here to support FIFO based ipcrouter channels. FIFO Transport
+ Layer enables IPC Router communication between two virtual machines.
+ The shared memory between virtual machines will be allocated by the
+ hypervisor and signal other VMs through virtualized interrupts.
+
endif # QRTR
diff --git a/net/qrtr/Makefile b/net/qrtr/Makefile
index d3c3a19..cae5493 100644
--- a/net/qrtr/Makefile
+++ b/net/qrtr/Makefile
@@ -9,3 +9,6 @@
obj-$(CONFIG_QRTR_USB) += qrtr-usb.o
qrtr-usb-y := usb.o
+
+obj-$(CONFIG_QRTR_FIFO) += qrtr-fifo.o
+qrtr-fifo-y := fifo.o
diff --git a/net/qrtr/fifo.c b/net/qrtr/fifo.c
new file mode 100644
index 0000000..0a494a6
--- /dev/null
+++ b/net/qrtr/fifo.c
@@ -0,0 +1,382 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/* Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/of_device.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/skbuff.h>
+#include <linux/sched.h>
+#include <microvisor/microvisor.h>
+
+#include "qrtr.h"
+
+#define FIFO_MAGIC_KEY 0x24495043 /* "$IPC" */
+#define FIFO_SIZE 0x4000
+#define FIFO_0_START 0x1000
+#define FIFO_1_START (FIFO_0_START + FIFO_SIZE)
+#define FIFO_MAGIC_IDX 0x0
+#define TAIL_0_IDX 0x1
+#define HEAD_0_IDX 0x2
+#define TAIL_1_IDX 0x3
+#define HEAD_1_IDX 0x4
+
+struct fifo_pipe {
+ __le32 *tail;
+ __le32 *head;
+
+ void *fifo;
+ size_t length;
+};
+
+/**
+ * qrtr_fifo_xprt - qrtr FIFO transport structure
+ * @ep: qrtr endpoint specific info.
+ * @tx_pipe: TX FIFO specific info.
+ * @rx_pipe: RX FIFO specific info.
+ * @fifo_base: Base of the shared FIFO.
+ * @fifo_size: FIFO Size.
+ * @tx_fifo_idx: TX FIFO index.
+ * @kcap: Register info to raise irq to other VM.
+ */
+struct qrtr_fifo_xprt {
+ struct qrtr_endpoint ep;
+ struct fifo_pipe tx_pipe;
+ struct fifo_pipe rx_pipe;
+ void *fifo_base;
+ size_t fifo_size;
+ int tx_fifo_idx;
+ okl4_kcap_t kcap;
+};
+
+static void qrtr_fifo_raise_virq(struct qrtr_fifo_xprt *xprtp);
+
+static size_t fifo_rx_avail(struct fifo_pipe *pipe)
+{
+ u32 head;
+ u32 tail;
+
+ head = le32_to_cpu(*pipe->head);
+ tail = le32_to_cpu(*pipe->tail);
+
+ if (head < tail)
+ return pipe->length - tail + head;
+
+ return head - tail;
+}
+
+static void fifo_rx_peak(struct fifo_pipe *pipe,
+ void *data, unsigned int offset, size_t count)
+{
+ size_t len;
+ u32 tail;
+
+ tail = le32_to_cpu(*pipe->tail);
+ tail += offset;
+ if (tail >= pipe->length)
+ tail -= pipe->length;
+
+ len = min_t(size_t, count, pipe->length - tail);
+ if (len)
+ memcpy_fromio(data, pipe->fifo + tail, len);
+
+ if (len != count)
+ memcpy_fromio(data + len, pipe->fifo, (count - len));
+}
+
+static void fifo_rx_advance(struct fifo_pipe *pipe, size_t count)
+{
+ u32 tail;
+
+ tail = le32_to_cpu(*pipe->tail);
+
+ tail += count;
+ if (tail > pipe->length)
+ tail -= pipe->length;
+
+ *pipe->tail = cpu_to_le32(tail);
+}
+
+static size_t fifo_tx_avail(struct fifo_pipe *pipe)
+{
+ u32 head;
+ u32 tail;
+ u32 avail;
+
+ head = le32_to_cpu(*pipe->head);
+ tail = le32_to_cpu(*pipe->tail);
+
+ if (tail <= head)
+ avail = pipe->length - head + tail;
+ else
+ avail = tail - head;
+
+ return avail;
+}
+
+static void fifo_tx_write(struct fifo_pipe *pipe,
+ const void *data, size_t count)
+{
+ size_t len;
+ u32 head;
+
+ head = le32_to_cpu(*pipe->head);
+
+ len = min_t(size_t, count, pipe->length - head);
+ if (len)
+ memcpy_toio(pipe->fifo + head, data, len);
+
+ if (len != count)
+ memcpy_toio(pipe->fifo, data + len, count - len);
+
+ head += count;
+ if (head >= pipe->length)
+ head -= pipe->length;
+
+ /* Ensure ordering of fifo and head update */
+ wmb();
+
+ *pipe->head = cpu_to_le32(head);
+}
+
+/* from qrtr to FIFO */
+static int xprt_write(struct qrtr_endpoint *ep, struct sk_buff *skb)
+{
+ struct qrtr_fifo_xprt *xprtp;
+ int rc;
+
+ xprtp = container_of(ep, struct qrtr_fifo_xprt, ep);
+
+ rc = skb_linearize(skb);
+ if (rc) {
+ kfree_skb(skb);
+ return rc;
+ }
+
+ if (fifo_tx_avail(&xprtp->tx_pipe) < skb->len) {
+ pr_err("No Space in FIFO\n");
+ return -EAGAIN;
+ }
+
+ fifo_tx_write(&xprtp->tx_pipe, skb->data, skb->len);
+ kfree_skb(skb);
+
+ qrtr_fifo_raise_virq(xprtp);
+
+ return 0;
+}
+
+static void xprt_read_data(struct qrtr_fifo_xprt *xprtp)
+{
+ int rc;
+ u32 hdr[8];
+ void *data;
+ size_t pkt_len;
+ size_t rx_avail;
+ size_t hdr_len = sizeof(hdr);
+
+ while (fifo_rx_avail(&xprtp->rx_pipe)) {
+ fifo_rx_peak(&xprtp->rx_pipe, &hdr, 0, hdr_len);
+ pkt_len = qrtr_peek_pkt_size((void *)&hdr);
+ if ((int)pkt_len < 0) {
+ pr_err("invalid pkt_len %zu\n", pkt_len);
+ break;
+ }
+
+ data = kzalloc(pkt_len, GFP_ATOMIC);
+ if (!data)
+ break;
+
+ rx_avail = fifo_rx_avail(&xprtp->rx_pipe);
+ if (rx_avail < pkt_len) {
+ pr_err_ratelimited("Not FULL pkt in FIFO %zu %zu\n",
+ rx_avail, pkt_len);
+ break;
+ }
+
+ fifo_rx_peak(&xprtp->rx_pipe, data, 0, pkt_len);
+ fifo_rx_advance(&xprtp->rx_pipe, pkt_len);
+
+ rc = qrtr_endpoint_post(&xprtp->ep, data, pkt_len);
+ if (rc == -EINVAL)
+ pr_err("invalid ipcrouter packet\n");
+ kfree(data);
+ data = NULL;
+ }
+}
+
+static void qrtr_fifo_raise_virq(struct qrtr_fifo_xprt *xprtp)
+{
+ okl4_error_t err;
+ unsigned long payload = 0xffff;
+
+ err = _okl4_sys_vinterrupt_raise(xprtp->kcap, payload);
+}
+
+static irqreturn_t qrtr_fifo_virq_handler(int irq, void *dev_id)
+{
+ xprt_read_data((struct qrtr_fifo_xprt *)dev_id);
+ return IRQ_HANDLED;
+}
+
+/**
+ * qrtr_fifo_config_init() - init FIFO xprt configs
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called to initialize the FIFO XPRT pointer with
+ * the FIFO XPRT configurations either from device tree or static arrays.
+ */
+static void qrtr_fifo_config_init(struct qrtr_fifo_xprt *xprtp)
+{
+ __le32 *descs;
+
+ descs = xprtp->fifo_base;
+ descs[FIFO_MAGIC_IDX] = FIFO_MAGIC_KEY;
+
+ if (xprtp->tx_fifo_idx) {
+ xprtp->tx_pipe.tail = &descs[TAIL_0_IDX];
+ xprtp->tx_pipe.head = &descs[HEAD_0_IDX];
+ xprtp->tx_pipe.fifo = xprtp->fifo_base + FIFO_0_START;
+ xprtp->tx_pipe.length = FIFO_SIZE;
+
+ xprtp->rx_pipe.tail = &descs[TAIL_1_IDX];
+ xprtp->rx_pipe.head = &descs[HEAD_1_IDX];
+ xprtp->rx_pipe.fifo = xprtp->fifo_base + FIFO_1_START;
+ xprtp->rx_pipe.length = FIFO_SIZE;
+ } else {
+ xprtp->tx_pipe.tail = &descs[TAIL_1_IDX];
+ xprtp->tx_pipe.head = &descs[HEAD_1_IDX];
+ xprtp->tx_pipe.fifo = xprtp->fifo_base + FIFO_1_START;
+ xprtp->tx_pipe.length = FIFO_SIZE;
+
+ xprtp->rx_pipe.tail = &descs[TAIL_0_IDX];
+ xprtp->rx_pipe.head = &descs[HEAD_0_IDX];
+ xprtp->rx_pipe.fifo = xprtp->fifo_base + FIFO_0_START;
+ xprtp->rx_pipe.length = FIFO_SIZE;
+ }
+
+ /* Reset respective index */
+ *xprtp->tx_pipe.head = 0;
+ *xprtp->rx_pipe.tail = 0;
+}
+
+/**
+ * qrtr_fifo_xprt_probe() - Probe an FIFO xprt
+ *
+ * @pdev: Platform device corresponding to FIFO xprt.
+ *
+ * @return: 0 on success, standard Linux error codes on error.
+ *
+ * This function is called when the underlying device tree driver registers
+ * a platform device, mapped to an FIFO transport.
+ */
+static int qrtr_fifo_xprt_probe(struct platform_device *pdev)
+{
+ int irq;
+ int ret;
+ struct resource *r;
+ struct device *parent;
+ struct qrtr_fifo_xprt *xprtp;
+ struct device_node *ipc_irq_np;
+ struct device_node *ipc_shm_np;
+ struct platform_device *ipc_shm_dev;
+
+ xprtp = devm_kzalloc(&pdev->dev, sizeof(*xprtp), GFP_KERNEL);
+ if (!xprtp)
+ return -ENOMEM;
+
+ parent = &pdev->dev;
+ ipc_irq_np = parent->of_node;
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0)
+ return -ENODEV;
+
+ ret = devm_request_irq(parent, irq, qrtr_fifo_virq_handler,
+ IRQF_TRIGGER_RISING, dev_name(parent),
+ xprtp);
+ if (ret < 0)
+ return -ENODEV;
+
+ /* this kcap is required to raise VIRQ */
+ ret = of_property_read_u32(ipc_irq_np, "reg", &xprtp->kcap);
+ if (ret < 0)
+ return -ENODEV;
+
+ ipc_shm_np = of_parse_phandle(ipc_irq_np, "qcom,ipc-shm", 0);
+ if (!ipc_shm_np)
+ return -ENODEV;
+
+ ipc_shm_dev = of_find_device_by_node(ipc_shm_np);
+ if (!ipc_shm_dev) {
+ of_node_put(ipc_shm_np);
+ return -ENODEV;
+ }
+
+ r = platform_get_resource(ipc_shm_dev, IORESOURCE_MEM, 0);
+ if (!r) {
+ pr_err("failed to get shared FIFO\n");
+ of_node_put(ipc_shm_np);
+ return -ENODEV;
+ }
+
+ xprtp->tx_fifo_idx = of_property_read_bool(ipc_shm_np,
+ "qcom,tx-is-first");
+ of_node_put(ipc_shm_np);
+
+ xprtp->fifo_size = resource_size(r);
+ xprtp->fifo_base = devm_ioremap_nocache(&pdev->dev, r->start,
+ resource_size(r));
+ if (!xprtp->fifo_base) {
+ pr_err("ioreamp_nocache() failed\n");
+ return -ENOMEM;
+ }
+ qrtr_fifo_config_init(xprtp);
+
+ xprtp->ep.xmit = xprt_write;
+ ret = qrtr_endpoint_register(&xprtp->ep, QRTR_EP_NID_AUTO);
+ if (ret)
+ return ret;
+
+ if (fifo_rx_avail(&xprtp->rx_pipe))
+ xprt_read_data(xprtp);
+
+ return 0;
+}
+
+static const struct of_device_id qrtr_fifo_xprt_match_table[] = {
+ { .compatible = "qcom,ipcr-fifo-xprt" },
+ {},
+};
+
+static struct platform_driver qrtr_fifo_xprt_driver = {
+ .probe = qrtr_fifo_xprt_probe,
+ .driver = {
+ .name = "qcom_fifo_qrtr",
+ .of_match_table = qrtr_fifo_xprt_match_table,
+ },
+};
+
+static int __init qrtr_fifo_xprt_init(void)
+{
+ int rc;
+
+ rc = platform_driver_register(&qrtr_fifo_xprt_driver);
+ if (rc) {
+ pr_err("driver register failed %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+module_init(qrtr_fifo_xprt_init);
+MODULE_DESCRIPTION("QTI IPC-router FIFO XPRT");
+MODULE_LICENSE("GPL v2");
diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c
index 6c32eb9..5c3d455 100644
--- a/net/qrtr/qrtr.c
+++ b/net/qrtr/qrtr.c
@@ -530,14 +530,12 @@
hdr->type = cpu_to_le32(type);
hdr->src_node_id = cpu_to_le32(from->sq_node);
hdr->src_port_id = cpu_to_le32(from->sq_port);
- if (to->sq_port == QRTR_PORT_CTRL) {
+ if (to->sq_node == QRTR_NODE_BCAST)
hdr->dst_node_id = cpu_to_le32(node->nid);
- hdr->dst_port_id = cpu_to_le32(QRTR_NODE_BCAST);
- } else {
+ else
hdr->dst_node_id = cpu_to_le32(to->sq_node);
- hdr->dst_port_id = cpu_to_le32(to->sq_port);
- }
+ hdr->dst_port_id = cpu_to_le32(to->sq_port);
hdr->size = cpu_to_le32(len);
hdr->confirm_rx = !!confirm_rx;
@@ -780,49 +778,44 @@
static struct qrtr_sock *qrtr_port_lookup(int port);
static void qrtr_port_put(struct qrtr_sock *ipc);
-static bool qrtr_must_forward(u32 src_nid, u32 dst_nid, u32 type)
+static bool qrtr_must_forward(struct qrtr_node *src,
+ struct qrtr_node *dst, u32 type)
{
- struct qrtr_node *dst;
- struct qrtr_node *src;
- bool ret = false;
-
- if (src_nid == qrtr_local_nid)
+ /* Node structure is not maintained for local processor.
+ * Hence src is null in that case.
+ */
+ if (!src)
return true;
- if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
- return ret;
+ if (!dst)
+ return false;
- dst = qrtr_node_lookup(dst_nid);
- src = qrtr_node_lookup(src_nid);
- if (!dst || !src)
- goto out;
- if (dst == src)
- goto out;
- if (dst->nid == QRTR_EP_NID_AUTO)
- goto out;
+ if (type == QRTR_TYPE_HELLO || type == QRTR_TYPE_RESUME_TX)
+ return false;
+
+ if (dst == src || dst->nid == QRTR_EP_NID_AUTO)
+ return false;
if (abs(dst->net_id - src->net_id) > 1)
- ret = true;
+ return true;
-out:
- qrtr_node_release(dst);
- qrtr_node_release(src);
-
- return ret;
+ return false;
}
static void qrtr_fwd_ctrl_pkt(struct sk_buff *skb)
{
struct qrtr_node *node;
+ struct qrtr_node *src;
struct qrtr_cb *cb = (struct qrtr_cb *)skb->cb;
+ src = qrtr_node_lookup(cb->src_node);
down_read(&qrtr_node_lock);
list_for_each_entry(node, &qrtr_all_epts, item) {
struct sockaddr_qrtr from;
struct sockaddr_qrtr to;
struct sk_buff *skbn;
- if (!qrtr_must_forward(cb->src_node, node->nid, cb->type))
+ if (!qrtr_must_forward(src, node, cb->type))
continue;
skbn = skb_clone(skb, GFP_KERNEL);
@@ -840,6 +833,7 @@
qrtr_node_enqueue(node, skbn, cb->type, &from, &to, 0);
}
up_read(&qrtr_node_lock);
+ qrtr_node_release(src);
}
static void qrtr_fwd_pkt(struct sk_buff *skb, struct qrtr_cb *cb)
@@ -992,7 +986,7 @@
struct sk_buff *skb;
list_for_each_entry(dst, &qrtr_all_epts, item) {
- if (!qrtr_must_forward(nid, dst->nid, QRTR_TYPE_DEL_PROC))
+ if (!qrtr_must_forward(src, dst, QRTR_TYPE_DEL_PROC))
continue;
skb = qrtr_alloc_ctrl_packet(&pkt);
@@ -1199,7 +1193,8 @@
sock_hold(&ipc->sk);
ipc->sk.sk_err = ENETRESET;
- ipc->sk.sk_error_report(&ipc->sk);
+ if (ipc->sk.sk_error_report)
+ ipc->sk.sk_error_report(&ipc->sk);
sock_put(&ipc->sk);
}
}
@@ -1300,7 +1295,8 @@
if (sk && sk->sk_err == ENETRESET) {
sock_hold(sk);
sk->sk_err = ENETRESET;
- sk->sk_error_report(sk);
+ if (sk->sk_error_report)
+ sk->sk_error_report(sk);
sock_put(sk);
kfree_skb(skb);
return 0;
@@ -1355,6 +1351,7 @@
struct sock *sk = sock->sk;
struct qrtr_ctrl_pkt *pkt;
struct qrtr_node *node;
+ struct qrtr_node *srv_node;
struct sk_buff *skb;
size_t plen;
u32 type = QRTR_TYPE_DATA;
@@ -1392,6 +1389,7 @@
}
node = NULL;
+ srv_node = NULL;
if (addr->sq_node == QRTR_NODE_BCAST) {
enqueue_fn = qrtr_bcast_enqueue;
if (addr->sq_port != QRTR_PORT_CTRL) {
@@ -1445,11 +1443,14 @@
/* drop new server cmds that are not forwardable to dst node*/
pkt = (struct qrtr_ctrl_pkt *)skb->data;
- if (!qrtr_must_forward(pkt->server.node, addr->sq_node, type)) {
+ srv_node = qrtr_node_lookup(pkt->server.node);
+ if (!qrtr_must_forward(srv_node, node, type)) {
rc = 0;
kfree_skb(skb);
+ qrtr_node_release(srv_node);
goto out_node;
}
+ qrtr_node_release(srv_node);
}
rc = enqueue_fn(node, skb, type, &ipc->us, addr, msg->msg_flags);
diff --git a/net/rxrpc/conn_client.c b/net/rxrpc/conn_client.c
index 521189f..6e419b1 100644
--- a/net/rxrpc/conn_client.c
+++ b/net/rxrpc/conn_client.c
@@ -353,7 +353,7 @@
* normally have to take channel_lock but we do this before anyone else
* can see the connection.
*/
- list_add_tail(&call->chan_wait_link, &candidate->waiting_calls);
+ list_add(&call->chan_wait_link, &candidate->waiting_calls);
if (cp->exclusive) {
call->conn = candidate;
@@ -432,7 +432,7 @@
call->conn = conn;
call->security_ix = conn->security_ix;
call->service_id = conn->service_id;
- list_add(&call->chan_wait_link, &conn->waiting_calls);
+ list_add_tail(&call->chan_wait_link, &conn->waiting_calls);
spin_unlock(&conn->channel_lock);
_leave(" = 0 [extant %d]", conn->debug_id);
return 0;
diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c
index 8525de81..334f3a05 100644
--- a/net/sched/act_ipt.c
+++ b/net/sched/act_ipt.c
@@ -199,8 +199,7 @@
err2:
kfree(tname);
err1:
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
return err;
}
diff --git a/net/sched/act_skbedit.c b/net/sched/act_skbedit.c
index 73e44ce..86d90fc 100644
--- a/net/sched/act_skbedit.c
+++ b/net/sched/act_skbedit.c
@@ -191,8 +191,7 @@
params_new = kzalloc(sizeof(*params_new), GFP_KERNEL);
if (unlikely(!params_new)) {
- if (ret == ACT_P_CREATED)
- tcf_idr_release(*a, bind);
+ tcf_idr_release(*a, bind);
return -ENOMEM;
}
diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c
index 0f6601f..72d9c43 100644
--- a/net/sched/act_tunnel_key.c
+++ b/net/sched/act_tunnel_key.c
@@ -377,7 +377,8 @@
return ret;
release_tun_meta:
- dst_release(&metadata->dst);
+ if (metadata)
+ dst_release(&metadata->dst);
err_out:
if (exists)
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index 84893bc..09b3597 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -1213,6 +1213,24 @@
if (err < 0)
goto errout;
+ if (tb[TCA_FLOWER_FLAGS]) {
+ fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
+
+ if (!tc_flags_valid(fnew->flags)) {
+ err = -EINVAL;
+ goto errout;
+ }
+ }
+
+ err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
+ tp->chain->tmplt_priv, extack);
+ if (err)
+ goto errout;
+
+ err = fl_check_assign_mask(head, fnew, fold, mask);
+ if (err)
+ goto errout;
+
if (!handle) {
handle = 1;
err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
@@ -1223,37 +1241,19 @@
handle, GFP_KERNEL);
}
if (err)
- goto errout;
+ goto errout_mask;
fnew->handle = handle;
- if (tb[TCA_FLOWER_FLAGS]) {
- fnew->flags = nla_get_u32(tb[TCA_FLOWER_FLAGS]);
-
- if (!tc_flags_valid(fnew->flags)) {
- err = -EINVAL;
- goto errout_idr;
- }
- }
-
- err = fl_set_parms(net, tp, fnew, mask, base, tb, tca[TCA_RATE], ovr,
- tp->chain->tmplt_priv, extack);
- if (err)
- goto errout_idr;
-
- err = fl_check_assign_mask(head, fnew, fold, mask);
- if (err)
- goto errout_idr;
-
if (!tc_skip_sw(fnew->flags)) {
if (!fold && fl_lookup(fnew->mask, &fnew->mkey)) {
err = -EEXIST;
- goto errout_mask;
+ goto errout_idr;
}
err = rhashtable_insert_fast(&fnew->mask->ht, &fnew->ht_node,
fnew->mask->filter_ht_params);
if (err)
- goto errout_mask;
+ goto errout_idr;
}
if (!tc_skip_hw(fnew->flags)) {
@@ -1290,12 +1290,13 @@
kfree(mask);
return 0;
-errout_mask:
- fl_mask_put(head, fnew->mask, false);
-
errout_idr:
if (!fold)
idr_remove(&head->handle_idr, fnew->handle);
+
+errout_mask:
+ fl_mask_put(head, fnew->mask, false);
+
errout:
tcf_exts_destroy(&fnew->exts);
kfree(fnew);
diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c
index 69078c8..77b289d 100644
--- a/net/sched/sch_generic.c
+++ b/net/sched/sch_generic.c
@@ -68,7 +68,7 @@
skb = __skb_dequeue(&q->skb_bad_txq);
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_dec(q, skb);
- qdisc_qstats_cpu_qlen_dec(q);
+ qdisc_qstats_atomic_qlen_dec(q);
} else {
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
@@ -108,7 +108,7 @@
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_inc(q, skb);
- qdisc_qstats_cpu_qlen_inc(q);
+ qdisc_qstats_atomic_qlen_inc(q);
} else {
qdisc_qstats_backlog_inc(q, skb);
q->q.qlen++;
@@ -147,7 +147,7 @@
qdisc_qstats_cpu_requeues_inc(q);
qdisc_qstats_cpu_backlog_inc(q, skb);
- qdisc_qstats_cpu_qlen_inc(q);
+ qdisc_qstats_atomic_qlen_inc(q);
skb = next;
}
@@ -252,7 +252,7 @@
skb = __skb_dequeue(&q->gso_skb);
if (qdisc_is_percpu_stats(q)) {
qdisc_qstats_cpu_backlog_dec(q, skb);
- qdisc_qstats_cpu_qlen_dec(q);
+ qdisc_qstats_atomic_qlen_dec(q);
} else {
qdisc_qstats_backlog_dec(q, skb);
q->q.qlen--;
@@ -633,7 +633,7 @@
if (unlikely(err))
return qdisc_drop_cpu(skb, qdisc, to_free);
- qdisc_qstats_cpu_qlen_inc(qdisc);
+ qdisc_qstats_atomic_qlen_inc(qdisc);
/* Note: skb can not be used after skb_array_produce(),
* so we better not use qdisc_qstats_cpu_backlog_inc()
*/
@@ -658,7 +658,7 @@
if (likely(skb)) {
qdisc_qstats_cpu_backlog_dec(qdisc, skb);
qdisc_bstats_cpu_update(qdisc, skb);
- qdisc_qstats_cpu_qlen_dec(qdisc);
+ qdisc_qstats_atomic_qlen_dec(qdisc);
}
return skb;
@@ -702,7 +702,6 @@
struct gnet_stats_queue *q = per_cpu_ptr(qdisc->cpu_qstats, i);
q->backlog = 0;
- q->qlen = 0;
}
}
diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c
index 74c0f65..4dfe10b9 100644
--- a/net/sched/sch_netem.c
+++ b/net/sched/sch_netem.c
@@ -440,6 +440,7 @@
int nb = 0;
int count = 1;
int rc = NET_XMIT_SUCCESS;
+ int rc_drop = NET_XMIT_DROP;
/* Do not fool qdisc_drop_all() */
skb->prev = NULL;
@@ -479,6 +480,7 @@
q->duplicate = 0;
rootq->enqueue(skb2, rootq, to_free);
q->duplicate = dupsave;
+ rc_drop = NET_XMIT_SUCCESS;
}
/*
@@ -491,7 +493,7 @@
if (skb_is_gso(skb)) {
segs = netem_segment(skb, sch, to_free);
if (!segs)
- return NET_XMIT_DROP;
+ return rc_drop;
} else {
segs = skb;
}
@@ -514,8 +516,10 @@
1<<(prandom_u32() % 8);
}
- if (unlikely(sch->q.qlen >= sch->limit))
- return qdisc_drop_all(skb, sch, to_free);
+ if (unlikely(sch->q.qlen >= sch->limit)) {
+ qdisc_drop_all(skb, sch, to_free);
+ return rc_drop;
+ }
qdisc_qstats_backlog_inc(sch, skb);
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index e5e70cf..1b16250 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1884,6 +1884,7 @@
pr_debug("%s: aborting association:%p\n", __func__, asoc);
sctp_primitive_ABORT(net, asoc, chunk);
+ iov_iter_revert(&msg->msg_iter, msg_len);
return 0;
}
diff --git a/net/sctp/stream.c b/net/sctp/stream.c
index 2936ed1..3b47457 100644
--- a/net/sctp/stream.c
+++ b/net/sctp/stream.c
@@ -230,8 +230,6 @@
for (i = 0; i < stream->outcnt; i++)
SCTP_SO(stream, i)->state = SCTP_STREAM_OPEN;
- sched->init(stream);
-
in:
sctp_stream_interleave_init(stream);
if (!incnt)
diff --git a/net/smc/smc.h b/net/smc/smc.h
index 5721416..adbdf19 100644
--- a/net/smc/smc.h
+++ b/net/smc/smc.h
@@ -113,9 +113,9 @@
} __aligned(8);
enum smc_urg_state {
- SMC_URG_VALID, /* data present */
- SMC_URG_NOTYET, /* data pending */
- SMC_URG_READ /* data was already read */
+ SMC_URG_VALID = 1, /* data present */
+ SMC_URG_NOTYET = 2, /* data pending */
+ SMC_URG_READ = 3, /* data was already read */
};
struct smc_connection {
diff --git a/net/socket.c b/net/socket.c
index 7d2703f..7a0ddf8 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -587,6 +587,7 @@
if (inode)
inode_lock(inode);
sock->ops->release(sock);
+ sock->sk = NULL;
if (inode)
inode_unlock(inode);
sock->ops = NULL;
diff --git a/net/tipc/socket.c b/net/tipc/socket.c
index e1bdaf0..88c307e 100644
--- a/net/tipc/socket.c
+++ b/net/tipc/socket.c
@@ -377,11 +377,13 @@
#define tipc_wait_for_cond(sock_, timeo_, condition_) \
({ \
+ DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
struct sock *sk_; \
int rc_; \
\
while ((rc_ = !(condition_))) { \
- DEFINE_WAIT_FUNC(wait_, woken_wake_function); \
+ /* coupled with smp_wmb() in tipc_sk_proto_rcv() */ \
+ smp_rmb(); \
sk_ = (sock_)->sk; \
rc_ = tipc_sk_sock_err((sock_), timeo_); \
if (rc_) \
@@ -1318,7 +1320,7 @@
if (unlikely(!dest)) {
dest = &tsk->peer;
- if (!syn || dest->family != AF_TIPC)
+ if (!syn && dest->family != AF_TIPC)
return -EDESTADDRREQ;
}
@@ -1961,6 +1963,8 @@
return;
case SOCK_WAKEUP:
tipc_dest_del(&tsk->cong_links, msg_orignode(hdr), 0);
+ /* coupled with smp_rmb() in tipc_wait_for_cond() */
+ smp_wmb();
tsk->cong_link_cnt--;
wakeup = true;
break;
diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c
index c754f3a..f601933 100644
--- a/net/unix/af_unix.c
+++ b/net/unix/af_unix.c
@@ -888,7 +888,7 @@
addr->hash ^= sk->sk_type;
__unix_remove_socket(sk);
- u->addr = addr;
+ smp_store_release(&u->addr, addr);
__unix_insert_socket(&unix_socket_table[addr->hash], sk);
spin_unlock(&unix_table_lock);
err = 0;
@@ -1058,7 +1058,7 @@
err = 0;
__unix_remove_socket(sk);
- u->addr = addr;
+ smp_store_release(&u->addr, addr);
__unix_insert_socket(list, sk);
out_unlock:
@@ -1329,15 +1329,29 @@
RCU_INIT_POINTER(newsk->sk_wq, &newu->peer_wq);
otheru = unix_sk(other);
- /* copy address information from listening to new sock*/
- if (otheru->addr) {
- refcount_inc(&otheru->addr->refcnt);
- newu->addr = otheru->addr;
- }
+ /* copy address information from listening to new sock
+ *
+ * The contents of *(otheru->addr) and otheru->path
+ * are seen fully set up here, since we have found
+ * otheru in hash under unix_table_lock. Insertion
+ * into the hash chain we'd found it in had been done
+ * in an earlier critical area protected by unix_table_lock,
+ * the same one where we'd set *(otheru->addr) contents,
+ * as well as otheru->path and otheru->addr itself.
+ *
+ * Using smp_store_release() here to set newu->addr
+ * is enough to make those stores, as well as stores
+ * to newu->path visible to anyone who gets newu->addr
+ * by smp_load_acquire(). IOW, the same warranties
+ * as for unix_sock instances bound in unix_bind() or
+ * in unix_autobind().
+ */
if (otheru->path.dentry) {
path_get(&otheru->path);
newu->path = otheru->path;
}
+ refcount_inc(&otheru->addr->refcnt);
+ smp_store_release(&newu->addr, otheru->addr);
/* Set credentials */
copy_peercred(sk, other);
@@ -1451,7 +1465,7 @@
static int unix_getname(struct socket *sock, struct sockaddr *uaddr, int peer)
{
struct sock *sk = sock->sk;
- struct unix_sock *u;
+ struct unix_address *addr;
DECLARE_SOCKADDR(struct sockaddr_un *, sunaddr, uaddr);
int err = 0;
@@ -1466,19 +1480,15 @@
sock_hold(sk);
}
- u = unix_sk(sk);
- unix_state_lock(sk);
- if (!u->addr) {
+ addr = smp_load_acquire(&unix_sk(sk)->addr);
+ if (!addr) {
sunaddr->sun_family = AF_UNIX;
sunaddr->sun_path[0] = 0;
err = sizeof(short);
} else {
- struct unix_address *addr = u->addr;
-
err = addr->len;
memcpy(sunaddr, addr->name, addr->len);
}
- unix_state_unlock(sk);
sock_put(sk);
out:
return err;
@@ -2071,11 +2081,11 @@
static void unix_copy_addr(struct msghdr *msg, struct sock *sk)
{
- struct unix_sock *u = unix_sk(sk);
+ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
- if (u->addr) {
- msg->msg_namelen = u->addr->len;
- memcpy(msg->msg_name, u->addr->name, u->addr->len);
+ if (addr) {
+ msg->msg_namelen = addr->len;
+ memcpy(msg->msg_name, addr->name, addr->len);
}
}
@@ -2579,15 +2589,14 @@
if (!ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN))
return -EPERM;
- unix_state_lock(sk);
- path = unix_sk(sk)->path;
- if (!path.dentry) {
- unix_state_unlock(sk);
+ if (!smp_load_acquire(&unix_sk(sk)->addr))
return -ENOENT;
- }
+
+ path = unix_sk(sk)->path;
+ if (!path.dentry)
+ return -ENOENT;
path_get(&path);
- unix_state_unlock(sk);
fd = get_unused_fd_flags(O_CLOEXEC);
if (fd < 0)
@@ -2828,7 +2837,7 @@
(s->sk_state == TCP_ESTABLISHED ? SS_CONNECTING : SS_DISCONNECTING),
sock_i_ino(s));
- if (u->addr) {
+ if (u->addr) { // under unix_table_lock here
int i, len;
seq_putc(seq, ' ');
diff --git a/net/unix/diag.c b/net/unix/diag.c
index 384c84e..3183d9b 100644
--- a/net/unix/diag.c
+++ b/net/unix/diag.c
@@ -10,7 +10,8 @@
static int sk_diag_dump_name(struct sock *sk, struct sk_buff *nlskb)
{
- struct unix_address *addr = unix_sk(sk)->addr;
+ /* might or might not have unix_table_lock */
+ struct unix_address *addr = smp_load_acquire(&unix_sk(sk)->addr);
if (!addr)
return 0;
diff --git a/net/vmw_vsock/virtio_transport.c b/net/vmw_vsock/virtio_transport.c
index 5d3cce9..15eb5d3 100644
--- a/net/vmw_vsock/virtio_transport.c
+++ b/net/vmw_vsock/virtio_transport.c
@@ -75,6 +75,9 @@
{
struct virtio_vsock *vsock = virtio_vsock_get();
+ if (!vsock)
+ return VMADDR_CID_ANY;
+
return vsock->guest_cid;
}
@@ -584,10 +587,6 @@
virtio_vsock_update_guest_cid(vsock);
- ret = vsock_core_init(&virtio_transport.transport);
- if (ret < 0)
- goto out_vqs;
-
vsock->rx_buf_nr = 0;
vsock->rx_buf_max_nr = 0;
atomic_set(&vsock->queued_replies, 0);
@@ -618,8 +617,6 @@
mutex_unlock(&the_virtio_vsock_mutex);
return 0;
-out_vqs:
- vsock->vdev->config->del_vqs(vsock->vdev);
out:
kfree(vsock);
mutex_unlock(&the_virtio_vsock_mutex);
@@ -637,6 +634,9 @@
flush_work(&vsock->event_work);
flush_work(&vsock->send_pkt_work);
+ /* Reset all connected sockets when the device disappear */
+ vsock_for_each_connected_socket(virtio_vsock_reset_sock);
+
vdev->config->reset(vdev);
mutex_lock(&vsock->rx_lock);
@@ -669,7 +669,6 @@
mutex_lock(&the_virtio_vsock_mutex);
the_virtio_vsock = NULL;
- vsock_core_exit();
mutex_unlock(&the_virtio_vsock_mutex);
vdev->config->del_vqs(vdev);
@@ -702,14 +701,28 @@
virtio_vsock_workqueue = alloc_workqueue("virtio_vsock", 0, 0);
if (!virtio_vsock_workqueue)
return -ENOMEM;
+
ret = register_virtio_driver(&virtio_vsock_driver);
if (ret)
- destroy_workqueue(virtio_vsock_workqueue);
+ goto out_wq;
+
+ ret = vsock_core_init(&virtio_transport.transport);
+ if (ret)
+ goto out_vdr;
+
+ return 0;
+
+out_vdr:
+ unregister_virtio_driver(&virtio_vsock_driver);
+out_wq:
+ destroy_workqueue(virtio_vsock_workqueue);
return ret;
+
}
static void __exit virtio_vsock_exit(void)
{
+ vsock_core_exit();
unregister_virtio_driver(&virtio_vsock_driver);
destroy_workqueue(virtio_vsock_workqueue);
}
diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c
index fef473c..f7f53f9 100644
--- a/net/x25/af_x25.c
+++ b/net/x25/af_x25.c
@@ -679,8 +679,7 @@
struct sockaddr_x25 *addr = (struct sockaddr_x25 *)uaddr;
int len, i, rc = 0;
- if (!sock_flag(sk, SOCK_ZAPPED) ||
- addr_len != sizeof(struct sockaddr_x25) ||
+ if (addr_len != sizeof(struct sockaddr_x25) ||
addr->sx25_family != AF_X25) {
rc = -EINVAL;
goto out;
@@ -695,9 +694,13 @@
}
lock_sock(sk);
- x25_sk(sk)->source_addr = addr->sx25_addr;
- x25_insert_socket(sk);
- sock_reset_flag(sk, SOCK_ZAPPED);
+ if (sock_flag(sk, SOCK_ZAPPED)) {
+ x25_sk(sk)->source_addr = addr->sx25_addr;
+ x25_insert_socket(sk);
+ sock_reset_flag(sk, SOCK_ZAPPED);
+ } else {
+ rc = -EINVAL;
+ }
release_sock(sk);
SOCK_DEBUG(sk, "x25_bind: socket is bound\n");
out:
@@ -813,8 +816,13 @@
sock->state = SS_CONNECTED;
rc = 0;
out_put_neigh:
- if (rc)
+ if (rc) {
+ read_lock_bh(&x25_list_lock);
x25_neigh_put(x25->neighbour);
+ x25->neighbour = NULL;
+ read_unlock_bh(&x25_list_lock);
+ x25->state = X25_STATE_0;
+ }
out_put_route:
x25_route_put(rt);
out:
diff --git a/security/apparmor/domain.c b/security/apparmor/domain.c
index 08c88de..11975ec 100644
--- a/security/apparmor/domain.c
+++ b/security/apparmor/domain.c
@@ -1444,7 +1444,10 @@
new = aa_label_merge(label, target, GFP_KERNEL);
if (IS_ERR_OR_NULL(new)) {
info = "failed to build target label";
- error = PTR_ERR(new);
+ if (!new)
+ error = -ENOMEM;
+ else
+ error = PTR_ERR(new);
new = NULL;
perms.allow = 0;
goto audit;
diff --git a/security/lsm_audit.c b/security/lsm_audit.c
index f840010..33028c0 100644
--- a/security/lsm_audit.c
+++ b/security/lsm_audit.c
@@ -321,6 +321,7 @@
if (a->u.net->sk) {
struct sock *sk = a->u.net->sk;
struct unix_sock *u;
+ struct unix_address *addr;
int len = 0;
char *p = NULL;
@@ -351,14 +352,15 @@
#endif
case AF_UNIX:
u = unix_sk(sk);
+ addr = smp_load_acquire(&u->addr);
+ if (!addr)
+ break;
if (u->path.dentry) {
audit_log_d_path(ab, " path=", &u->path);
break;
}
- if (!u->addr)
- break;
- len = u->addr->len-sizeof(short);
- p = &u->addr->name->sun_path[0];
+ len = addr->len-sizeof(short);
+ p = &addr->name->sun_path[0];
audit_log_format(ab, " path=");
if (*p)
audit_log_untrustedstring(ab, p);
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index de4af8a..5636e89 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -474,7 +474,19 @@
/* Focusrite, SaffirePro 26 I/O */
SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000003, &saffirepro_26_spec),
/* Focusrite, SaffirePro 10 I/O */
- SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, 0x00000006, &saffirepro_10_spec),
+ {
+ // The combination of vendor_id and model_id is the same as the
+ // same as the one of Liquid Saffire 56.
+ .match_flags = IEEE1394_MATCH_VENDOR_ID |
+ IEEE1394_MATCH_MODEL_ID |
+ IEEE1394_MATCH_SPECIFIER_ID |
+ IEEE1394_MATCH_VERSION,
+ .vendor_id = VEN_FOCUSRITE,
+ .model_id = 0x000006,
+ .specifier_id = 0x00a02d,
+ .version = 0x010001,
+ .driver_data = (kernel_ulong_t)&saffirepro_10_spec,
+ },
/* Focusrite, Saffire(no label and LE) */
SND_BEBOB_DEV_ENTRY(VEN_FOCUSRITE, MODEL_FOCUSRITE_SAFFIRE_BOTH,
&saffire_spec),
diff --git a/sound/firewire/motu/amdtp-motu.c b/sound/firewire/motu/amdtp-motu.c
index f0555a2..6c9b743 100644
--- a/sound/firewire/motu/amdtp-motu.c
+++ b/sound/firewire/motu/amdtp-motu.c
@@ -136,7 +136,9 @@
byte = (u8 *)buffer + p->pcm_byte_offset;
for (c = 0; c < channels; ++c) {
- *dst = (byte[0] << 24) | (byte[1] << 16) | byte[2];
+ *dst = (byte[0] << 24) |
+ (byte[1] << 16) |
+ (byte[2] << 8);
byte += 3;
dst++;
}
diff --git a/sound/hda/hdac_i915.c b/sound/hda/hdac_i915.c
index 617ff1a..27eb027 100644
--- a/sound/hda/hdac_i915.c
+++ b/sound/hda/hdac_i915.c
@@ -144,9 +144,9 @@
return -ENODEV;
if (!acomp->ops) {
request_module("i915");
- /* 10s timeout */
+ /* 60s timeout */
wait_for_completion_timeout(&bind_complete,
- msecs_to_jiffies(10 * 1000));
+ msecs_to_jiffies(60 * 1000));
}
if (!acomp->ops) {
dev_info(bus->dev, "couldn't bind with audio component\n");
diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c
index fead0ac..3cbd211 100644
--- a/sound/pci/hda/patch_conexant.c
+++ b/sound/pci/hda/patch_conexant.c
@@ -936,6 +936,9 @@
SND_PCI_QUIRK(0x103c, 0x8299, "HP 800 G3 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x829a, "HP 800 G3 DM", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x8455, "HP Z2 G4", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8456, "HP Z2 G4 SFF", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8457, "HP Z2 G4 mini", CXT_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x8458, "HP Z2 G4 mini premium", CXT_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1043, 0x138d, "Asus", CXT_FIXUP_HEADPHONE_MIC_PIN),
SND_PCI_QUIRK(0x152d, 0x0833, "OLPC XO-1.5", CXT_FIXUP_OLPC_XO),
SND_PCI_QUIRK(0x17aa, 0x20f2, "Lenovo T400", CXT_PINCFG_LENOVO_TP410),
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index bf1ffca..8772931 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -118,6 +118,7 @@
unsigned int has_alc5505_dsp:1;
unsigned int no_depop_delay:1;
unsigned int done_hp_init:1;
+ unsigned int no_shutup_pins:1;
/* for PLL fix */
hda_nid_t pll_nid;
@@ -476,6 +477,14 @@
set_eapd(codec, *p, on);
}
+static void alc_shutup_pins(struct hda_codec *codec)
+{
+ struct alc_spec *spec = codec->spec;
+
+ if (!spec->no_shutup_pins)
+ snd_hda_shutup_pins(codec);
+}
+
/* generic shutup callback;
* just turning off EAPD and a little pause for avoiding pop-noise
*/
@@ -486,7 +495,7 @@
alc_auto_setup_eapd(codec, false);
if (!spec->no_depop_delay)
msleep(200);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
/* generic EAPD initialization */
@@ -814,7 +823,7 @@
if (spec && spec->shutup)
spec->shutup(codec);
else
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
static void alc_reboot_notify(struct hda_codec *codec)
@@ -2950,7 +2959,7 @@
(alc_get_coef0(codec) & 0x00ff) == 0x018) {
msleep(150);
}
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
static struct coef_fw alc282_coefs[] = {
@@ -3053,14 +3062,15 @@
if (hp_pin_sense)
msleep(85);
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
if (hp_pin_sense)
msleep(100);
alc_auto_setup_eapd(codec, false);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
alc_write_coef_idx(codec, 0x78, coef78);
}
@@ -3166,15 +3176,16 @@
if (hp_pin_sense)
msleep(100);
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
if (hp_pin_sense)
msleep(100);
alc_auto_setup_eapd(codec, false);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
alc_write_coef_idx(codec, 0x43, 0x9614);
}
@@ -3240,14 +3251,15 @@
/* NOTE: call this before clearing the pin, otherwise codec stalls */
alc_update_coef_idx(codec, 0x46, 0, 3 << 12);
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
if (hp_pin_sense)
msleep(100);
alc_auto_setup_eapd(codec, false);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
static void alc225_init(struct hda_codec *codec)
@@ -3334,7 +3346,7 @@
msleep(100);
alc_auto_setup_eapd(codec, false);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
static void alc_default_init(struct hda_codec *codec)
@@ -3388,14 +3400,15 @@
if (hp_pin_sense)
msleep(85);
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
if (hp_pin_sense)
msleep(100);
alc_auto_setup_eapd(codec, false);
- snd_hda_shutup_pins(codec);
+ alc_shutup_pins(codec);
}
static void alc294_hp_init(struct hda_codec *codec)
@@ -3412,8 +3425,9 @@
msleep(100);
- snd_hda_codec_write(codec, hp_pin, 0,
- AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
+ if (!spec->no_shutup_pins)
+ snd_hda_codec_write(codec, hp_pin, 0,
+ AC_VERB_SET_PIN_WIDGET_CONTROL, 0x0);
alc_update_coef_idx(codec, 0x6f, 0x000f, 0);/* Set HP depop to manual mode */
alc_update_coefex_idx(codec, 0x58, 0x00, 0x8000, 0x8000); /* HP depop procedure start */
@@ -5007,16 +5021,12 @@
}
}
-static void alc_no_shutup(struct hda_codec *codec)
-{
-}
-
static void alc_fixup_no_shutup(struct hda_codec *codec,
const struct hda_fixup *fix, int action)
{
if (action == HDA_FIXUP_ACT_PRE_PROBE) {
struct alc_spec *spec = codec->spec;
- spec->shutup = alc_no_shutup;
+ spec->no_shutup_pins = 1;
}
}
@@ -5602,6 +5612,7 @@
ALC294_FIXUP_ASUS_SPK,
ALC293_FIXUP_SYSTEM76_MIC_NO_PRESENCE,
ALC285_FIXUP_LENOVO_PC_BEEP_IN_NOISE,
+ ALC255_FIXUP_ACER_HEADSET_MIC,
};
static const struct hda_fixup alc269_fixups[] = {
@@ -6546,6 +6557,16 @@
.chained = true,
.chain_id = ALC285_FIXUP_LENOVO_HEADPHONE_NOISE
},
+ [ALC255_FIXUP_ACER_HEADSET_MIC] = {
+ .type = HDA_FIXUP_PINS,
+ .v.pins = (const struct hda_pintbl[]) {
+ { 0x19, 0x03a11130 },
+ { 0x1a, 0x90a60140 }, /* use as internal mic */
+ { }
+ },
+ .chained = true,
+ .chain_id = ALC255_FIXUP_HEADSET_MODE_NO_HP_MIC
+ },
};
static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -6565,6 +6586,7 @@
SND_PCI_QUIRK(0x1025, 0x128f, "Acer Veriton Z6860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1290, "Acer Veriton Z4860G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x1025, 0x1291, "Acer Veriton Z4660G", ALC286_FIXUP_ACER_AIO_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x1025, 0x1330, "Acer TravelMate X514-51T", ALC255_FIXUP_ACER_HEADSET_MIC),
SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
SND_PCI_QUIRK(0x1028, 0x054b, "Dell XPS one 2710", ALC275_FIXUP_DELL_XPS),
SND_PCI_QUIRK(0x1028, 0x05bd, "Dell Latitude E6440", ALC292_FIXUP_DELL_E7X),
@@ -6596,6 +6618,7 @@
SND_PCI_QUIRK(0x1028, 0x0704, "Dell XPS 13 9350", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x0738, "Dell Precision 5820", ALC269_FIXUP_NO_SHUTUP),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
@@ -6670,11 +6693,13 @@
SND_PCI_QUIRK(0x103c, 0x2336, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x2337, "HP", ALC269_FIXUP_HP_MUTE_LED_MIC1),
SND_PCI_QUIRK(0x103c, 0x221c, "HP EliteBook 755 G2", ALC280_FIXUP_HP_HEADSET_MIC),
+ SND_PCI_QUIRK(0x103c, 0x802e, "HP Z240 SFF", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x802f, "HP Z240", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x820d, "HP Pavilion 15", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x103c, 0x8256, "HP", ALC221_FIXUP_HP_FRONT_MIC),
SND_PCI_QUIRK(0x103c, 0x827e, "HP x360", ALC295_FIXUP_HP_X360),
- SND_PCI_QUIRK(0x103c, 0x82bf, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
- SND_PCI_QUIRK(0x103c, 0x82c0, "HP", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x82bf, "HP G3 mini", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
+ SND_PCI_QUIRK(0x103c, 0x82c0, "HP G3 mini premium", ALC221_FIXUP_HP_MIC_NO_PRESENCE),
SND_PCI_QUIRK(0x103c, 0x83b9, "HP Spectre x360", ALC269_FIXUP_HP_MUTE_LED_MIC3),
SND_PCI_QUIRK(0x1043, 0x103e, "ASUS X540SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x103f, "ASUS TX300", ALC282_FIXUP_ASUS_TX300),
@@ -6690,7 +6715,6 @@
SND_PCI_QUIRK(0x1043, 0x12e0, "ASUS X541SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x13b0, "ASUS Z550SA", ALC256_FIXUP_ASUS_MIC),
SND_PCI_QUIRK(0x1043, 0x1427, "Asus Zenbook UX31E", ALC269VB_FIXUP_ASUS_ZENBOOK),
- SND_PCI_QUIRK(0x1043, 0x14a1, "ASUS UX533FD", ALC294_FIXUP_ASUS_SPK),
SND_PCI_QUIRK(0x1043, 0x1517, "Asus Zenbook UX31A", ALC269VB_FIXUP_ASUS_ZENBOOK_UX31A),
SND_PCI_QUIRK(0x1043, 0x16e3, "ASUS UX50", ALC269_FIXUP_STEREO_DMIC),
SND_PCI_QUIRK(0x1043, 0x1a13, "Asus G73Jw", ALC269_FIXUP_ASUS_G73JW),
@@ -7306,6 +7330,10 @@
SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
{0x12, 0x90a60130},
{0x17, 0x90170110},
+ {0x21, 0x03211020}),
+ SND_HDA_PIN_QUIRK(0x10ec0294, 0x1043, "ASUS", ALC294_FIXUP_ASUS_SPK,
+ {0x12, 0x90a60130},
+ {0x17, 0x90170110},
{0x21, 0x04211020}),
SND_HDA_PIN_QUIRK(0x10ec0295, 0x1028, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE,
ALC295_STANDARD_PINS,
diff --git a/tools/bpf/bpftool/map.c b/tools/bpf/bpftool/map.c
index b455930..ec73d83 100644
--- a/tools/bpf/bpftool/map.c
+++ b/tools/bpf/bpftool/map.c
@@ -370,6 +370,20 @@
return argv + i;
}
+/* on per cpu maps we must copy the provided value on all value instances */
+static void fill_per_cpu_value(struct bpf_map_info *info, void *value)
+{
+ unsigned int i, n, step;
+
+ if (!map_is_per_cpu(info->type))
+ return;
+
+ n = get_possible_cpus();
+ step = round_up(info->value_size, 8);
+ for (i = 1; i < n; i++)
+ memcpy(value + i * step, value, info->value_size);
+}
+
static int parse_elem(char **argv, struct bpf_map_info *info,
void *key, void *value, __u32 key_size, __u32 value_size,
__u32 *flags, __u32 **value_fd)
@@ -449,6 +463,8 @@
argv = parse_bytes(argv, "value", value, value_size);
if (!argv)
return -1;
+
+ fill_per_cpu_value(info, value);
}
return parse_elem(argv, info, key, NULL, key_size, value_size,
diff --git a/tools/bpf/bpftool/prog.c b/tools/bpf/bpftool/prog.c
index 0de024a..bbba0d6 100644
--- a/tools/bpf/bpftool/prog.c
+++ b/tools/bpf/bpftool/prog.c
@@ -109,13 +109,14 @@
static int prog_fd_by_tag(unsigned char *tag)
{
- struct bpf_prog_info info = {};
- __u32 len = sizeof(info);
unsigned int id = 0;
int err;
int fd;
while (true) {
+ struct bpf_prog_info info = {};
+ __u32 len = sizeof(info);
+
err = bpf_prog_get_next_id(id, &id);
if (err) {
p_err("%s", strerror(errno));
diff --git a/tools/perf/builtin-script.c b/tools/perf/builtin-script.c
index 6c1e7ce..53c11fc 100644
--- a/tools/perf/builtin-script.c
+++ b/tools/perf/builtin-script.c
@@ -1589,13 +1589,8 @@
.force_header = false,
};
struct perf_evsel *ev2;
- static bool init;
u64 val;
- if (!init) {
- perf_stat__init_shadow_stats();
- init = true;
- }
if (!evsel->stats)
perf_evlist__alloc_stats(script->session->evlist, false);
if (evsel_script(evsel->leader)->gnum++ == 0)
@@ -1658,7 +1653,7 @@
return;
}
- if (PRINT_FIELD(TRACE)) {
+ if (PRINT_FIELD(TRACE) && sample->raw_data) {
event_format__fprintf(evsel->tp_format, sample->cpu,
sample->raw_data, sample->raw_size, fp);
}
@@ -2214,6 +2209,8 @@
signal(SIGINT, sig_handler);
+ perf_stat__init_shadow_stats();
+
/* override event processing functions */
if (script->show_task_events) {
script->tool.comm = process_comm_event;
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 22ab8e6..3f43aed 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -2263,19 +2263,30 @@
static bool perf_evlist__add_vfs_getname(struct perf_evlist *evlist)
{
- struct perf_evsel *evsel = perf_evsel__newtp("probe", "vfs_getname");
+ bool found = false;
+ struct perf_evsel *evsel, *tmp;
+ struct parse_events_error err = { .idx = 0, };
+ int ret = parse_events(evlist, "probe:vfs_getname*", &err);
- if (IS_ERR(evsel))
+ if (ret)
return false;
- if (perf_evsel__field(evsel, "pathname") == NULL) {
+ evlist__for_each_entry_safe(evlist, evsel, tmp) {
+ if (!strstarts(perf_evsel__name(evsel), "probe:vfs_getname"))
+ continue;
+
+ if (perf_evsel__field(evsel, "pathname")) {
+ evsel->handler = trace__vfs_getname;
+ found = true;
+ continue;
+ }
+
+ list_del_init(&evsel->node);
+ evsel->evlist = NULL;
perf_evsel__delete(evsel);
- return false;
}
- evsel->handler = trace__vfs_getname;
- perf_evlist__add(evlist, evsel);
- return true;
+ return found;
}
static struct perf_evsel *perf_evsel__new_pgfault(u64 config)
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 1ccbd33..383674f 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -134,7 +134,12 @@
if (!cpu_list)
return cpu_map__read_all_cpu_map();
- if (!isdigit(*cpu_list))
+ /*
+ * must handle the case of empty cpumap to cover
+ * TOPOLOGY header for NUMA nodes with no CPU
+ * ( e.g., because of CPU hotplug)
+ */
+ if (!isdigit(*cpu_list) && *cpu_list != '\0')
goto out;
while (isdigit(*cpu_list)) {
@@ -181,8 +186,10 @@
if (nr_cpus > 0)
cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
- else
+ else if (*cpu_list != '\0')
cpus = cpu_map__default_new();
+ else
+ cpus = cpu_map__dummy_new();
invalid:
free(tmp_cpus);
out:
diff --git a/tools/perf/util/symbol-elf.c b/tools/perf/util/symbol-elf.c
index 6e70cc0..a701a8a 100644
--- a/tools/perf/util/symbol-elf.c
+++ b/tools/perf/util/symbol-elf.c
@@ -87,6 +87,11 @@
return GELF_ST_TYPE(sym->st_info);
}
+static inline uint8_t elf_sym__visibility(const GElf_Sym *sym)
+{
+ return GELF_ST_VISIBILITY(sym->st_other);
+}
+
#ifndef STT_GNU_IFUNC
#define STT_GNU_IFUNC 10
#endif
@@ -111,7 +116,9 @@
return elf_sym__type(sym) == STT_NOTYPE &&
sym->st_name != 0 &&
sym->st_shndx != SHN_UNDEF &&
- sym->st_shndx != SHN_ABS;
+ sym->st_shndx != SHN_ABS &&
+ elf_sym__visibility(sym) != STV_HIDDEN &&
+ elf_sym__visibility(sym) != STV_INTERNAL;
}
static bool elf_sym__filter(GElf_Sym *sym)
diff --git a/tools/testing/selftests/bpf/bpf_util.h b/tools/testing/selftests/bpf/bpf_util.h
index 315a44f..84fd6f1 100644
--- a/tools/testing/selftests/bpf/bpf_util.h
+++ b/tools/testing/selftests/bpf/bpf_util.h
@@ -13,7 +13,7 @@
unsigned int start, end, possible_cpus = 0;
char buff[128];
FILE *fp;
- int n;
+ int len, n, i, j = 0;
fp = fopen(fcpu, "r");
if (!fp) {
@@ -21,17 +21,27 @@
exit(1);
}
- while (fgets(buff, sizeof(buff), fp)) {
- n = sscanf(buff, "%u-%u", &start, &end);
- if (n == 0) {
- printf("Failed to retrieve # possible CPUs!\n");
- exit(1);
- } else if (n == 1) {
- end = start;
- }
- possible_cpus = start == 0 ? end + 1 : 0;
- break;
+ if (!fgets(buff, sizeof(buff), fp)) {
+ printf("Failed to read %s!\n", fcpu);
+ exit(1);
}
+
+ len = strlen(buff);
+ for (i = 0; i <= len; i++) {
+ if (buff[i] == ',' || buff[i] == '\0') {
+ buff[i] = '\0';
+ n = sscanf(&buff[j], "%u-%u", &start, &end);
+ if (n <= 0) {
+ printf("Failed to retrieve # possible CPUs!\n");
+ exit(1);
+ } else if (n == 1) {
+ end = start;
+ }
+ possible_cpus += end - start + 1;
+ j = i + 1;
+ }
+ }
+
fclose(fp);
return possible_cpus;
diff --git a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
index bab13dd..0d26b5e 100755
--- a/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
+++ b/tools/testing/selftests/cpu-hotplug/cpu-on-off-test.sh
@@ -37,6 +37,10 @@
exit $ksft_skip
fi
+ present_cpus=`cat $SYSFS/devices/system/cpu/present`
+ present_max=${present_cpus##*-}
+ echo "present_cpus = $present_cpus present_max = $present_max"
+
echo -e "\t Cpus in online state: $online_cpus"
offline_cpus=`cat $SYSFS/devices/system/cpu/offline`
@@ -151,6 +155,8 @@
online_max=0
offline_cpus=0
offline_max=0
+present_cpus=0
+present_max=0
while getopts e:ahp: opt; do
case $opt in
@@ -190,9 +196,10 @@
online_cpu_expect_success $online_max
if [[ $offline_cpus -gt 0 ]]; then
- echo -e "\t offline to online to offline: cpu $offline_max"
- online_cpu_expect_success $offline_max
- offline_cpu_expect_success $offline_max
+ echo -e "\t offline to online to offline: cpu $present_max"
+ online_cpu_expect_success $present_max
+ offline_cpu_expect_success $present_max
+ online_cpu $present_max
fi
exit 0
else
diff --git a/tools/testing/selftests/firmware/fw_lib.sh b/tools/testing/selftests/firmware/fw_lib.sh
index 6c5f1b2..1cbb12e 100755
--- a/tools/testing/selftests/firmware/fw_lib.sh
+++ b/tools/testing/selftests/firmware/fw_lib.sh
@@ -91,7 +91,7 @@
if [ "$TEST_REQS_FW_SYSFS_FALLBACK" = "yes" ]; then
if [ ! "$HAS_FW_LOADER_USER_HELPER" = "yes" ]; then
echo "usermode helper disabled so ignoring test"
- exit $ksft_skip
+ exit 0
fi
fi
}
diff --git a/tools/testing/selftests/net/Makefile b/tools/testing/selftests/net/Makefile
index 919aa2a..9a3764a 100644
--- a/tools/testing/selftests/net/Makefile
+++ b/tools/testing/selftests/net/Makefile
@@ -18,6 +18,6 @@
KSFT_KHDR_INSTALL := 1
include ../lib.mk
-$(OUTPUT)/reuseport_bpf_numa: LDFLAGS += -lnuma
+$(OUTPUT)/reuseport_bpf_numa: LDLIBS += -lnuma
$(OUTPUT)/tcp_mmap: LDFLAGS += -lpthread
$(OUTPUT)/tcp_inq: LDFLAGS += -lpthread
diff --git a/tools/testing/selftests/netfilter/Makefile b/tools/testing/selftests/netfilter/Makefile
index 47ed6ce..c9ff2b4 100644
--- a/tools/testing/selftests/netfilter/Makefile
+++ b/tools/testing/selftests/netfilter/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
# Makefile for netfilter selftests
-TEST_PROGS := nft_trans_stress.sh
+TEST_PROGS := nft_trans_stress.sh nft_nat.sh
include ../lib.mk
diff --git a/tools/testing/selftests/netfilter/config b/tools/testing/selftests/netfilter/config
index 1017313..59caa8f 100644
--- a/tools/testing/selftests/netfilter/config
+++ b/tools/testing/selftests/netfilter/config
@@ -1,2 +1,2 @@
CONFIG_NET_NS=y
-NF_TABLES_INET=y
+CONFIG_NF_TABLES_INET=y
diff --git a/tools/testing/selftests/netfilter/nft_nat.sh b/tools/testing/selftests/netfilter/nft_nat.sh
new file mode 100755
index 0000000..8ec7668
--- /dev/null
+++ b/tools/testing/selftests/netfilter/nft_nat.sh
@@ -0,0 +1,762 @@
+#!/bin/bash
+#
+# This test is for basic NAT functionality: snat, dnat, redirect, masquerade.
+#
+
+# Kselftest framework requirement - SKIP code is 4.
+ksft_skip=4
+ret=0
+
+nft --version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without nft tool"
+ exit $ksft_skip
+fi
+
+ip -Version > /dev/null 2>&1
+if [ $? -ne 0 ];then
+ echo "SKIP: Could not run test without ip tool"
+ exit $ksft_skip
+fi
+
+ip netns add ns0
+ip netns add ns1
+ip netns add ns2
+
+ip link add veth0 netns ns0 type veth peer name eth0 netns ns1
+ip link add veth1 netns ns0 type veth peer name eth0 netns ns2
+
+ip -net ns0 link set lo up
+ip -net ns0 link set veth0 up
+ip -net ns0 addr add 10.0.1.1/24 dev veth0
+ip -net ns0 addr add dead:1::1/64 dev veth0
+
+ip -net ns0 link set veth1 up
+ip -net ns0 addr add 10.0.2.1/24 dev veth1
+ip -net ns0 addr add dead:2::1/64 dev veth1
+
+for i in 1 2; do
+ ip -net ns$i link set lo up
+ ip -net ns$i link set eth0 up
+ ip -net ns$i addr add 10.0.$i.99/24 dev eth0
+ ip -net ns$i route add default via 10.0.$i.1
+ ip -net ns$i addr add dead:$i::99/64 dev eth0
+ ip -net ns$i route add default via dead:$i::1
+done
+
+bad_counter()
+{
+ local ns=$1
+ local counter=$2
+ local expect=$3
+
+ echo "ERROR: $counter counter in $ns has unexpected value (expected $expect)" 1>&2
+ ip netns exec $ns nft list counter inet filter $counter 1>&2
+}
+
+check_counters()
+{
+ ns=$1
+ local lret=0
+
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0in | grep -q "packets 1 bytes 84")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0in "packets 1 bytes 84"
+ lret=1
+ fi
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0out | grep -q "packets 1 bytes 84")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0out "packets 1 bytes 84"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0in6 | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0in6 "$expect"
+ lret=1
+ fi
+ cnt=$(ip netns exec $ns nft list counter inet filter ns0out6 | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter $ns ns0out6 "$expect"
+ lret=1
+ fi
+
+ return $lret
+}
+
+check_ns0_counters()
+{
+ local ns=$1
+ local lret=0
+
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0in "packets 0 bytes 0"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0in6 | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0in6 "packets 0 bytes 0"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0out "packets 0 bytes 0"
+ lret=1
+ fi
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns0out6 | grep -q "packets 0 bytes 0")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0out6 "packets 0 bytes 0"
+ lret=1
+ fi
+
+ for dir in "in" "out" ; do
+ expect="packets 1 bytes 84"
+ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 $ns$dir "$expect"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ cnt=$(ip netns exec ns0 nft list counter inet filter ${ns}${dir}6 | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 $ns$dir6 "$expect"
+ lret=1
+ fi
+ done
+
+ return $lret
+}
+
+reset_counters()
+{
+ for i in 0 1 2;do
+ ip netns exec ns$i nft reset counters inet > /dev/null
+ done
+}
+
+test_local_dnat6()
+{
+ local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+ chain output {
+ type nat hook output priority 0; policy accept;
+ ip6 daddr dead:1::99 dnat to dead:2::99
+ }
+}
+EOF
+ if [ $? -ne 0 ]; then
+ echo "SKIP: Could not add add ip6 dnat hook"
+ return $ksft_skip
+ fi
+
+ # ping netns1, expect rewrite to netns2
+ ip netns exec ns0 ping -q -c 1 dead:1::99 > /dev/null
+ if [ $? -ne 0 ]; then
+ lret=1
+ echo "ERROR: ping6 failed"
+ return $lret
+ fi
+
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 0 count in ns1
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 1 packet in ns2
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ test $lret -eq 0 && echo "PASS: ipv6 ping to ns1 was NATted to ns2"
+ ip netns exec ns0 nft flush chain ip6 nat output
+
+ return $lret
+}
+
+test_local_dnat()
+{
+ local lret=0
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+ chain output {
+ type nat hook output priority 0; policy accept;
+ ip daddr 10.0.1.99 dnat to 10.0.2.99
+ }
+}
+EOF
+ # ping netns1, expect rewrite to netns2
+ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+ if [ $? -ne 0 ]; then
+ lret=1
+ echo "ERROR: ping failed"
+ return $lret
+ fi
+
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 0 count in ns1
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 1 packet in ns2
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ test $lret -eq 0 && echo "PASS: ping to ns1 was NATted to ns2"
+
+ ip netns exec ns0 nft flush chain ip nat output
+
+ reset_counters
+ ip netns exec ns0 ping -q -c 1 10.0.1.99 > /dev/null
+ if [ $? -ne 0 ]; then
+ lret=1
+ echo "ERROR: ping failed"
+ return $lret
+ fi
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 1 count in ns1
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns0 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # expect 0 packet in ns2
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns2$dir "$expect"
+ lret=1
+ fi
+ done
+
+ test $lret -eq 0 && echo "PASS: ping to ns1 OK after nat output chain flush"
+
+ return $lret
+}
+
+
+test_masquerade6()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 via ipv6"
+ return 1
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ meta oif veth0 masquerade
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ipv6 masquerading"
+ lret=1
+ fi
+
+ # ns1 should have seen packets from ns0, due to masquerade
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns1 should not have seen packets from ns2, due to masquerade
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft flush chain ip6 nat postrouting
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not flush ip6 nat postrouting" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IPv6 masquerade for ns2"
+
+ return $lret
+}
+
+test_masquerade()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: canot ping ns1 from ns2"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add masquerading rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+ chain postrouting {
+ type nat hook postrouting priority 0; policy accept;
+ meta oif veth0 masquerade
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip masquerading"
+ lret=1
+ fi
+
+ # ns1 should have seen packets from ns0, due to masquerade
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns0${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns1 should not have seen packets from ns2, due to masquerade
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft flush chain ip nat postrouting
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not flush nat postrouting" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IP masquerade for ns2"
+
+ return $lret
+}
+
+test_redirect6()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv6.conf.all.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannnot ping ns1 from ns2 via ipv6"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip6 nat {
+ chain prerouting {
+ type nat hook prerouting priority 0; policy accept;
+ meta iif veth1 meta l4proto icmpv6 ip6 saddr dead:2::99 ip6 daddr dead:1::99 redirect
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 dead:1::99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip6 redirect"
+ lret=1
+ fi
+
+ # ns1 should have seen no packets from ns2, due to redirection
+ expect="packets 0 bytes 0"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns0 should have seen packets from ns2, due to masquerade
+ expect="packets 1 bytes 104"
+ for dir in "in6" "out6" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft delete table ip6 nat
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not delete ip6 nat table" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IPv6 redirection for ns2"
+
+ return $lret
+}
+
+test_redirect()
+{
+ local lret=0
+
+ ip netns exec ns0 sysctl net.ipv4.conf.veth0.forwarding=1 > /dev/null
+ ip netns exec ns0 sysctl net.ipv4.conf.veth1.forwarding=1 > /dev/null
+
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2"
+ lret=1
+ fi
+
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns2$dir "$expect"
+ lret=1
+ fi
+
+ cnt=$(ip netns exec ns2 nft list counter inet filter ns1${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns2 ns1$dir "$expect"
+ lret=1
+ fi
+ done
+
+ reset_counters
+
+# add redirect rule
+ip netns exec ns0 nft -f - <<EOF
+table ip nat {
+ chain prerouting {
+ type nat hook prerouting priority 0; policy accept;
+ meta iif veth1 ip protocol icmp ip saddr 10.0.2.99 ip daddr 10.0.1.99 redirect
+ }
+}
+EOF
+ ip netns exec ns2 ping -q -c 1 10.0.1.99 > /dev/null # ping ns2->ns1
+ if [ $? -ne 0 ] ; then
+ echo "ERROR: cannot ping ns1 from ns2 with active ip redirect"
+ lret=1
+ fi
+
+ # ns1 should have seen no packets from ns2, due to redirection
+ expect="packets 0 bytes 0"
+ for dir in "in" "out" ; do
+
+ cnt=$(ip netns exec ns1 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ # ns0 should have seen packets from ns2, due to masquerade
+ expect="packets 1 bytes 84"
+ for dir in "in" "out" ; do
+ cnt=$(ip netns exec ns0 nft list counter inet filter ns2${dir} | grep -q "$expect")
+ if [ $? -ne 0 ]; then
+ bad_counter ns1 ns0$dir "$expect"
+ lret=1
+ fi
+ done
+
+ ip netns exec ns0 nft delete table ip nat
+ if [ $? -ne 0 ]; then
+ echo "ERROR: Could not delete nat table" 1>&2
+ lret=1
+ fi
+
+ test $lret -eq 0 && echo "PASS: IP redirection for ns2"
+
+ return $lret
+}
+
+
+# ip netns exec ns0 ping -c 1 -q 10.0.$i.99
+for i in 0 1 2; do
+ip netns exec ns$i nft -f - <<EOF
+table inet filter {
+ counter ns0in {}
+ counter ns1in {}
+ counter ns2in {}
+
+ counter ns0out {}
+ counter ns1out {}
+ counter ns2out {}
+
+ counter ns0in6 {}
+ counter ns1in6 {}
+ counter ns2in6 {}
+
+ counter ns0out6 {}
+ counter ns1out6 {}
+ counter ns2out6 {}
+
+ map nsincounter {
+ type ipv4_addr : counter
+ elements = { 10.0.1.1 : "ns0in",
+ 10.0.2.1 : "ns0in",
+ 10.0.1.99 : "ns1in",
+ 10.0.2.99 : "ns2in" }
+ }
+
+ map nsincounter6 {
+ type ipv6_addr : counter
+ elements = { dead:1::1 : "ns0in6",
+ dead:2::1 : "ns0in6",
+ dead:1::99 : "ns1in6",
+ dead:2::99 : "ns2in6" }
+ }
+
+ map nsoutcounter {
+ type ipv4_addr : counter
+ elements = { 10.0.1.1 : "ns0out",
+ 10.0.2.1 : "ns0out",
+ 10.0.1.99: "ns1out",
+ 10.0.2.99: "ns2out" }
+ }
+
+ map nsoutcounter6 {
+ type ipv6_addr : counter
+ elements = { dead:1::1 : "ns0out6",
+ dead:2::1 : "ns0out6",
+ dead:1::99 : "ns1out6",
+ dead:2::99 : "ns2out6" }
+ }
+
+ chain input {
+ type filter hook input priority 0; policy accept;
+ counter name ip saddr map @nsincounter
+ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 saddr map @nsincounter6
+ }
+ chain output {
+ type filter hook output priority 0; policy accept;
+ counter name ip daddr map @nsoutcounter
+ icmpv6 type { "echo-request", "echo-reply" } counter name ip6 daddr map @nsoutcounter6
+ }
+}
+EOF
+done
+
+sleep 3
+# test basic connectivity
+for i in 1 2; do
+ ip netns exec ns0 ping -c 1 -q 10.0.$i.99 > /dev/null
+ if [ $? -ne 0 ];then
+ echo "ERROR: Could not reach other namespace(s)" 1>&2
+ ret=1
+ fi
+
+ ip netns exec ns0 ping -c 1 -q dead:$i::99 > /dev/null
+ if [ $? -ne 0 ];then
+ echo "ERROR: Could not reach other namespace(s) via ipv6" 1>&2
+ ret=1
+ fi
+ check_counters ns$i
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+
+ check_ns0_counters ns$i
+ if [ $? -ne 0 ]; then
+ ret=1
+ fi
+ reset_counters
+done
+
+if [ $ret -eq 0 ];then
+ echo "PASS: netns routing/connectivity: ns0 can reach ns1 and ns2"
+fi
+
+reset_counters
+test_local_dnat
+test_local_dnat6
+
+reset_counters
+test_masquerade
+test_masquerade6
+
+reset_counters
+test_redirect
+test_redirect6
+
+for i in 0 1 2; do ip netns del ns$i;done
+
+exit $ret
diff --git a/tools/testing/selftests/proc/.gitignore b/tools/testing/selftests/proc/.gitignore
index 82121a8..29bac5e 100644
--- a/tools/testing/selftests/proc/.gitignore
+++ b/tools/testing/selftests/proc/.gitignore
@@ -10,4 +10,5 @@
/proc-uptime-002
/read
/self
+/setns-dcache
/thread-self
diff --git a/tools/testing/selftests/proc/Makefile b/tools/testing/selftests/proc/Makefile
index 1c12c34..434d033 100644
--- a/tools/testing/selftests/proc/Makefile
+++ b/tools/testing/selftests/proc/Makefile
@@ -14,6 +14,7 @@
TEST_GEN_PROGS += proc-uptime-002
TEST_GEN_PROGS += read
TEST_GEN_PROGS += self
+TEST_GEN_PROGS += setns-dcache
TEST_GEN_PROGS += thread-self
include ../lib.mk
diff --git a/tools/testing/selftests/proc/setns-dcache.c b/tools/testing/selftests/proc/setns-dcache.c
new file mode 100644
index 0000000..60ab197
--- /dev/null
+++ b/tools/testing/selftests/proc/setns-dcache.c
@@ -0,0 +1,129 @@
+/*
+ * Copyright © 2019 Alexey Dobriyan <adobriyan@gmail.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+/*
+ * Test that setns(CLONE_NEWNET) points to new /proc/net content even
+ * if old one is in dcache.
+ *
+ * FIXME /proc/net/unix is under CONFIG_UNIX which can be disabled.
+ */
+#undef NDEBUG
+#include <assert.h>
+#include <errno.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+
+static pid_t pid = -1;
+
+static void f(void)
+{
+ if (pid > 0) {
+ kill(pid, SIGTERM);
+ }
+}
+
+int main(void)
+{
+ int fd[2];
+ char _ = 0;
+ int nsfd;
+
+ atexit(f);
+
+ /* Check for priviledges and syscall availability straight away. */
+ if (unshare(CLONE_NEWNET) == -1) {
+ if (errno == ENOSYS || errno == EPERM) {
+ return 4;
+ }
+ return 1;
+ }
+ /* Distinguisher between two otherwise empty net namespaces. */
+ if (socket(AF_UNIX, SOCK_STREAM, 0) == -1) {
+ return 1;
+ }
+
+ if (pipe(fd) == -1) {
+ return 1;
+ }
+
+ pid = fork();
+ if (pid == -1) {
+ return 1;
+ }
+
+ if (pid == 0) {
+ if (unshare(CLONE_NEWNET) == -1) {
+ return 1;
+ }
+
+ if (write(fd[1], &_, 1) != 1) {
+ return 1;
+ }
+
+ pause();
+
+ return 0;
+ }
+
+ if (read(fd[0], &_, 1) != 1) {
+ return 1;
+ }
+
+ {
+ char buf[64];
+ snprintf(buf, sizeof(buf), "/proc/%u/ns/net", pid);
+ nsfd = open(buf, O_RDONLY);
+ if (nsfd == -1) {
+ return 1;
+ }
+ }
+
+ /* Reliably pin dentry into dcache. */
+ (void)open("/proc/net/unix", O_RDONLY);
+
+ if (setns(nsfd, CLONE_NEWNET) == -1) {
+ return 1;
+ }
+
+ kill(pid, SIGTERM);
+ pid = 0;
+
+ {
+ char buf[4096];
+ ssize_t rv;
+ int fd;
+
+ fd = open("/proc/net/unix", O_RDONLY);
+ if (fd == -1) {
+ return 1;
+ }
+
+#define S "Num RefCount Protocol Flags Type St Inode Path\n"
+ rv = read(fd, buf, sizeof(buf));
+
+ assert(rv == strlen(S));
+ assert(memcmp(buf, S, strlen(S)) == 0);
+ }
+
+ return 0;
+}
diff --git a/tools/testing/selftests/timers/Makefile b/tools/testing/selftests/timers/Makefile
index c02683c..7656c7c 100644
--- a/tools/testing/selftests/timers/Makefile
+++ b/tools/testing/selftests/timers/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
CFLAGS += -O3 -Wl,-no-as-needed -Wall
-LDFLAGS += -lrt -lpthread -lm
+LDLIBS += -lrt -lpthread -lm
# these are all "safe" tests that don't modify
# system time or require escalated privileges