Merge "thermal: tsens: Use the new threshold notification API"
diff --git a/AndroidKernel.mk b/AndroidKernel.mk
index 6ea10f7..7233829 100644
--- a/AndroidKernel.mk
+++ b/AndroidKernel.mk
@@ -46,7 +46,7 @@
ifeq ($(TARGET_KERNEL_CROSS_COMPILE_PREFIX),)
KERNEL_CROSS_COMPILE := arm-eabi-
else
-KERNEL_CROSS_COMPILE := $(shell pwd)/$(TARGET_TOOLS_PREFIX)
+KERNEL_CROSS_COMPILE := $(TARGET_KERNEL_CROSS_COMPILE_PREFIX)
endif
ifeq ($(TARGET_PREBUILT_KERNEL),)
diff --git a/Documentation/devicetree/bindings/net/macb.txt b/Documentation/devicetree/bindings/net/macb.txt
index 1506e94..d1f435c 100644
--- a/Documentation/devicetree/bindings/net/macb.txt
+++ b/Documentation/devicetree/bindings/net/macb.txt
@@ -10,6 +10,7 @@
Use "cdns,pc302-gem" for Picochip picoXcell pc302 and later devices based on
the Cadence GEM, or the generic form: "cdns,gem".
Use "atmel,sama5d2-gem" for the GEM IP (10/100) available on Atmel sama5d2 SoCs.
+ Use "atmel,sama5d3-macb" for the 10/100Mbit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d3-gem" for the Gigabit IP available on Atmel sama5d3 SoCs.
Use "atmel,sama5d4-gem" for the GEM IP (10/100) available on Atmel sama5d4 SoCs.
Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
diff --git a/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt b/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt
index 6026756..138fb7e 100644
--- a/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt
+++ b/Documentation/devicetree/bindings/pci/msm_ep_pcie.txt
@@ -4,7 +4,8 @@
- compatible: should be "qcom,pcie-ep".
- reg: should contain PCIe register maps.
- reg-names: indicates various resources passed to driver by name.
- Should be "msi", "dm_core", "elbi", "parf", "phy", "mmio".
+ Should be "msi", "dm_core", "elbi", "parf", "phy", "mmio",
+ "tcsr_pcie_perst_en".
These correspond to different modules within the PCIe domain.
- #address-cells: Should provide a value of 0.
- interrupt-parent: Should be the PCIe device node itself here.
@@ -77,8 +78,11 @@
<0xbffff000 0x1000>,
<0xfc520000 0x2000>,
<0xfc526000 0x1000>,
- <0xfc527000 0x1000>;
- reg-names = "msi", "dm_core", "elbi", "parf", "phy", "mmio";
+ <0xfc527000 0x1000>,
+ <0x01fcb000 0x1000>;
+
+ reg-names = "msi", "dm_core", "elbi", "parf", "phy", "mmio",
+ "tcsr_pcie_perst";
#address-cells = <0>;
interrupt-parent = <&pcie_ep>;
diff --git a/Documentation/devicetree/bindings/power/reset/msm-poweroff.txt b/Documentation/devicetree/bindings/power/reset/msm-poweroff.txt
index ce44ad3..1492616e 100644
--- a/Documentation/devicetree/bindings/power/reset/msm-poweroff.txt
+++ b/Documentation/devicetree/bindings/power/reset/msm-poweroff.txt
@@ -9,6 +9,10 @@
-compatible: "qcom,pshold"
-reg: Specifies the physical address of the ps-hold register
+Optional Properties:
+-qcom,force-warm-reboot: Issue a warm reboot, even for the traditional cases
+ where hard reboot is issued.
+
Example:
restart@fc4ab000 {
diff --git a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
index 6c9d8253..c517d3c 100644
--- a/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
+++ b/Documentation/devicetree/bindings/sound/qcom-audio-dev.txt
@@ -1643,6 +1643,16 @@
threshold values for different codecs. First parameter is V(voltage)
second one is i(current), third one is r (resistance). Depending on the
codec set corresponding element in array and set others to 0.
+- qcom,msm-linein-det-swh: This property is used to distinguish linein jack
+ switch type on target typically the switch type will be normally open or
+ normally close, value for this property 0 for normally close and 1 for
+ normally open.
+- qcom,msm-lineout-det-swh: This property is used to distinguish lineout jack
+ switch type on target typically the switch type will be normally open or
+ normally close, value for this property 0 for normally close and 1 for
+ normally open.
+- qcom,linein-det-gpio : GPIO on which linein jack insertion/removal interrupt is received.
+- qcom,lineout-det-gpio : GPIO on which lineout jack insertion/removal interrupt is received.
Example:
sound {
@@ -1703,6 +1713,10 @@
<&wsa881x_213>, <&wsa881x_214>;
qcom,wsa-aux-dev-prefix = "SpkrRight", "SpkrLeft",
"SpkrRight", "SpkrLeft";
+ qcom,linein-det-swh = <1>;
+ qcom,lineout-det-swh = <1>;
+ qcom,linein-det-gpio = <&tlmm 124 0>;
+ qcom,lineout-det-gpio = <&tlmm 125 0>;
};
* MSM8952 Slimbus ASoC Machine driver
diff --git a/Documentation/devicetree/bindings/usb/msm-phy.txt b/Documentation/devicetree/bindings/usb/msm-phy.txt
index bcaa311..684df30 100644
--- a/Documentation/devicetree/bindings/usb/msm-phy.txt
+++ b/Documentation/devicetree/bindings/usb/msm-phy.txt
@@ -189,8 +189,9 @@
- qcom,hold-reset: Indicates that hold QUSB PHY into reset state.
- qcom,phy-clk-scheme: Should be one of "cml" or "cmos" if ref_clk_addr is provided.
- qcom,major-rev: provide major revision number to differentiate power up sequence. default is 2.0
- - pinctrl-names/pinctrl-0/1: The GPIOs configured as output function. Names represents "active"
- state when attached in host mode and "suspend" state when detached.
+ - pinctrl-names/pinctrl-0/1: The GPIOs configured as output function. Names represents "active" or
+ "atest_usb13_active" state when attached in host mode and "suspend" or "atest_usb13_suspend"
+ state when detached.
- qcom,tune2-efuse-correction: The value to be adjusted from fused value for
improved rise/fall times.
- qcom,host-chirp-erratum: Indicates host chirp fix is required.
diff --git a/Documentation/filesystems/f2fs.txt b/Documentation/filesystems/f2fs.txt
index 193a034..d9a0f69 100644
--- a/Documentation/filesystems/f2fs.txt
+++ b/Documentation/filesystems/f2fs.txt
@@ -154,6 +154,26 @@
enabled by default.
data_flush Enable data flushing before checkpoint in order to
persist data of regular and symlink.
+fault_injection=%d Enable fault injection in all supported types with
+ specified injection rate.
+fault_type=%d Support configuring fault injection type, should be
+ enabled with fault_injection option, fault type value
+ is shown below, it supports single or combined type.
+ Type_Name Type_Value
+ FAULT_KMALLOC 0x000000001
+ FAULT_KVMALLOC 0x000000002
+ FAULT_PAGE_ALLOC 0x000000004
+ FAULT_PAGE_GET 0x000000008
+ FAULT_ALLOC_BIO 0x000000010
+ FAULT_ALLOC_NID 0x000000020
+ FAULT_ORPHAN 0x000000040
+ FAULT_BLOCK 0x000000080
+ FAULT_DIR_DEPTH 0x000000100
+ FAULT_EVICT_INODE 0x000000200
+ FAULT_TRUNCATE 0x000000400
+ FAULT_IO 0x000000800
+ FAULT_CHECKPOINT 0x000001000
+ FAULT_DISCARD 0x000002000
mode=%s Control block allocation mode which supports "adaptive"
and "lfs". In "lfs" mode, there should be no random
writes towards main area.
diff --git a/Documentation/filesystems/fscrypt.rst b/Documentation/filesystems/fscrypt.rst
deleted file mode 100644
index 48b424d..0000000
--- a/Documentation/filesystems/fscrypt.rst
+++ /dev/null
@@ -1,626 +0,0 @@
-=====================================
-Filesystem-level encryption (fscrypt)
-=====================================
-
-Introduction
-============
-
-fscrypt is a library which filesystems can hook into to support
-transparent encryption of files and directories.
-
-Note: "fscrypt" in this document refers to the kernel-level portion,
-implemented in ``fs/crypto/``, as opposed to the userspace tool
-`fscrypt <https://github.com/google/fscrypt>`_. This document only
-covers the kernel-level portion. For command-line examples of how to
-use encryption, see the documentation for the userspace tool `fscrypt
-<https://github.com/google/fscrypt>`_. Also, it is recommended to use
-the fscrypt userspace tool, or other existing userspace tools such as
-`fscryptctl <https://github.com/google/fscryptctl>`_ or `Android's key
-management system
-<https://source.android.com/security/encryption/file-based>`_, over
-using the kernel's API directly. Using existing tools reduces the
-chance of introducing your own security bugs. (Nevertheless, for
-completeness this documentation covers the kernel's API anyway.)
-
-Unlike dm-crypt, fscrypt operates at the filesystem level rather than
-at the block device level. This allows it to encrypt different files
-with different keys and to have unencrypted files on the same
-filesystem. This is useful for multi-user systems where each user's
-data-at-rest needs to be cryptographically isolated from the others.
-However, except for filenames, fscrypt does not encrypt filesystem
-metadata.
-
-Unlike eCryptfs, which is a stacked filesystem, fscrypt is integrated
-directly into supported filesystems --- currently ext4, F2FS, and
-UBIFS. This allows encrypted files to be read and written without
-caching both the decrypted and encrypted pages in the pagecache,
-thereby nearly halving the memory used and bringing it in line with
-unencrypted files. Similarly, half as many dentries and inodes are
-needed. eCryptfs also limits encrypted filenames to 143 bytes,
-causing application compatibility issues; fscrypt allows the full 255
-bytes (NAME_MAX). Finally, unlike eCryptfs, the fscrypt API can be
-used by unprivileged users, with no need to mount anything.
-
-fscrypt does not support encrypting files in-place. Instead, it
-supports marking an empty directory as encrypted. Then, after
-userspace provides the key, all regular files, directories, and
-symbolic links created in that directory tree are transparently
-encrypted.
-
-Threat model
-============
-
-Offline attacks
----------------
-
-Provided that userspace chooses a strong encryption key, fscrypt
-protects the confidentiality of file contents and filenames in the
-event of a single point-in-time permanent offline compromise of the
-block device content. fscrypt does not protect the confidentiality of
-non-filename metadata, e.g. file sizes, file permissions, file
-timestamps, and extended attributes. Also, the existence and location
-of holes (unallocated blocks which logically contain all zeroes) in
-files is not protected.
-
-fscrypt is not guaranteed to protect confidentiality or authenticity
-if an attacker is able to manipulate the filesystem offline prior to
-an authorized user later accessing the filesystem.
-
-Online attacks
---------------
-
-fscrypt (and storage encryption in general) can only provide limited
-protection, if any at all, against online attacks. In detail:
-
-fscrypt is only resistant to side-channel attacks, such as timing or
-electromagnetic attacks, to the extent that the underlying Linux
-Cryptographic API algorithms are. If a vulnerable algorithm is used,
-such as a table-based implementation of AES, it may be possible for an
-attacker to mount a side channel attack against the online system.
-Side channel attacks may also be mounted against applications
-consuming decrypted data.
-
-After an encryption key has been provided, fscrypt is not designed to
-hide the plaintext file contents or filenames from other users on the
-same system, regardless of the visibility of the keyring key.
-Instead, existing access control mechanisms such as file mode bits,
-POSIX ACLs, LSMs, or mount namespaces should be used for this purpose.
-Also note that as long as the encryption keys are *anywhere* in
-memory, an online attacker can necessarily compromise them by mounting
-a physical attack or by exploiting any kernel security vulnerability
-which provides an arbitrary memory read primitive.
-
-While it is ostensibly possible to "evict" keys from the system,
-recently accessed encrypted files will remain accessible at least
-until the filesystem is unmounted or the VFS caches are dropped, e.g.
-using ``echo 2 > /proc/sys/vm/drop_caches``. Even after that, if the
-RAM is compromised before being powered off, it will likely still be
-possible to recover portions of the plaintext file contents, if not
-some of the encryption keys as well. (Since Linux v4.12, all
-in-kernel keys related to fscrypt are sanitized before being freed.
-However, userspace would need to do its part as well.)
-
-Currently, fscrypt does not prevent a user from maliciously providing
-an incorrect key for another user's existing encrypted files. A
-protection against this is planned.
-
-Key hierarchy
-=============
-
-Master Keys
------------
-
-Each encrypted directory tree is protected by a *master key*. Master
-keys can be up to 64 bytes long, and must be at least as long as the
-greater of the key length needed by the contents and filenames
-encryption modes being used. For example, if AES-256-XTS is used for
-contents encryption, the master key must be 64 bytes (512 bits). Note
-that the XTS mode is defined to require a key twice as long as that
-required by the underlying block cipher.
-
-To "unlock" an encrypted directory tree, userspace must provide the
-appropriate master key. There can be any number of master keys, each
-of which protects any number of directory trees on any number of
-filesystems.
-
-Userspace should generate master keys either using a cryptographically
-secure random number generator, or by using a KDF (Key Derivation
-Function). Note that whenever a KDF is used to "stretch" a
-lower-entropy secret such as a passphrase, it is critical that a KDF
-designed for this purpose be used, such as scrypt, PBKDF2, or Argon2.
-
-Per-file keys
--------------
-
-Master keys are not used to encrypt file contents or names directly.
-Instead, a unique key is derived for each encrypted file, including
-each regular file, directory, and symbolic link. This has several
-advantages:
-
-- In cryptosystems, the same key material should never be used for
- different purposes. Using the master key as both an XTS key for
- contents encryption and as a CTS-CBC key for filenames encryption
- would violate this rule.
-- Per-file keys simplify the choice of IVs (Initialization Vectors)
- for contents encryption. Without per-file keys, to ensure IV
- uniqueness both the inode and logical block number would need to be
- encoded in the IVs. This would make it impossible to renumber
- inodes, which e.g. ``resize2fs`` can do when resizing an ext4
- filesystem. With per-file keys, it is sufficient to encode just the
- logical block number in the IVs.
-- Per-file keys strengthen the encryption of filenames, where IVs are
- reused out of necessity. With a unique key per directory, IV reuse
- is limited to within a single directory.
-- Per-file keys allow individual files to be securely erased simply by
- securely erasing their keys. (Not yet implemented.)
-
-A KDF (Key Derivation Function) is used to derive per-file keys from
-the master key. This is done instead of wrapping a randomly-generated
-key for each file because it reduces the size of the encryption xattr,
-which for some filesystems makes the xattr more likely to fit in-line
-in the filesystem's inode table. With a KDF, only a 16-byte nonce is
-required --- long enough to make key reuse extremely unlikely. A
-wrapped key, on the other hand, would need to be up to 64 bytes ---
-the length of an AES-256-XTS key. Furthermore, currently there is no
-requirement to support unlocking a file with multiple alternative
-master keys or to support rotating master keys. Instead, the master
-keys may be wrapped in userspace, e.g. as done by the `fscrypt
-<https://github.com/google/fscrypt>`_ tool.
-
-The current KDF encrypts the master key using the 16-byte nonce as an
-AES-128-ECB key. The output is used as the derived key. If the
-output is longer than needed, then it is truncated to the needed
-length. Truncation is the norm for directories and symlinks, since
-those use the CTS-CBC encryption mode which requires a key half as
-long as that required by the XTS encryption mode.
-
-Note: this KDF meets the primary security requirement, which is to
-produce unique derived keys that preserve the entropy of the master
-key, assuming that the master key is already a good pseudorandom key.
-However, it is nonstandard and has some problems such as being
-reversible, so it is generally considered to be a mistake! It may be
-replaced with HKDF or another more standard KDF in the future.
-
-Encryption modes and usage
-==========================
-
-fscrypt allows one encryption mode to be specified for file contents
-and one encryption mode to be specified for filenames. Different
-directory trees are permitted to use different encryption modes.
-Currently, the following pairs of encryption modes are supported:
-
-- AES-256-XTS for contents and AES-256-CTS-CBC for filenames
-- AES-128-CBC for contents and AES-128-CTS-CBC for filenames
-- Speck128/256-XTS for contents and Speck128/256-CTS-CBC for filenames
-
-It is strongly recommended to use AES-256-XTS for contents encryption.
-AES-128-CBC was added only for low-powered embedded devices with
-crypto accelerators such as CAAM or CESA that do not support XTS.
-
-Similarly, Speck128/256 support was only added for older or low-end
-CPUs which cannot do AES fast enough -- especially ARM CPUs which have
-NEON instructions but not the Cryptography Extensions -- and for which
-it would not otherwise be feasible to use encryption at all. It is
-not recommended to use Speck on CPUs that have AES instructions.
-Speck support is only available if it has been enabled in the crypto
-API via CONFIG_CRYPTO_SPECK. Also, on ARM platforms, to get
-acceptable performance CONFIG_CRYPTO_SPECK_NEON must be enabled.
-
-New encryption modes can be added relatively easily, without changes
-to individual filesystems. However, authenticated encryption (AE)
-modes are not currently supported because of the difficulty of dealing
-with ciphertext expansion.
-
-For file contents, each filesystem block is encrypted independently.
-Currently, only the case where the filesystem block size is equal to
-the system's page size (usually 4096 bytes) is supported. With the
-XTS mode of operation (recommended), the logical block number within
-the file is used as the IV. With the CBC mode of operation (not
-recommended), ESSIV is used; specifically, the IV for CBC is the
-logical block number encrypted with AES-256, where the AES-256 key is
-the SHA-256 hash of the inode's data encryption key.
-
-For filenames, the full filename is encrypted at once. Because of the
-requirements to retain support for efficient directory lookups and
-filenames of up to 255 bytes, a constant initialization vector (IV) is
-used. However, each encrypted directory uses a unique key, which
-limits IV reuse to within a single directory. Note that IV reuse in
-the context of CTS-CBC encryption means that when the original
-filenames share a common prefix at least as long as the cipher block
-size (16 bytes for AES), the corresponding encrypted filenames will
-also share a common prefix. This is undesirable; it may be fixed in
-the future by switching to an encryption mode that is a strong
-pseudorandom permutation on arbitrary-length messages, e.g. the HEH
-(Hash-Encrypt-Hash) mode.
-
-Since filenames are encrypted with the CTS-CBC mode of operation, the
-plaintext and ciphertext filenames need not be multiples of the AES
-block size, i.e. 16 bytes. However, the minimum size that can be
-encrypted is 16 bytes, so shorter filenames are NUL-padded to 16 bytes
-before being encrypted. In addition, to reduce leakage of filename
-lengths via their ciphertexts, all filenames are NUL-padded to the
-next 4, 8, 16, or 32-byte boundary (configurable). 32 is recommended
-since this provides the best confidentiality, at the cost of making
-directory entries consume slightly more space. Note that since NUL
-(``\0``) is not otherwise a valid character in filenames, the padding
-will never produce duplicate plaintexts.
-
-Symbolic link targets are considered a type of filename and are
-encrypted in the same way as filenames in directory entries. Each
-symlink also uses a unique key; hence, the hardcoded IV is not a
-problem for symlinks.
-
-User API
-========
-
-Setting an encryption policy
-----------------------------
-
-The FS_IOC_SET_ENCRYPTION_POLICY ioctl sets an encryption policy on an
-empty directory or verifies that a directory or regular file already
-has the specified encryption policy. It takes in a pointer to a
-:c:type:`struct fscrypt_policy`, defined as follows::
-
- #define FS_KEY_DESCRIPTOR_SIZE 8
-
- struct fscrypt_policy {
- __u8 version;
- __u8 contents_encryption_mode;
- __u8 filenames_encryption_mode;
- __u8 flags;
- __u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
- };
-
-This structure must be initialized as follows:
-
-- ``version`` must be 0.
-
-- ``contents_encryption_mode`` and ``filenames_encryption_mode`` must
- be set to constants from ``<linux/fs.h>`` which identify the
- encryption modes to use. If unsure, use
- FS_ENCRYPTION_MODE_AES_256_XTS (1) for ``contents_encryption_mode``
- and FS_ENCRYPTION_MODE_AES_256_CTS (4) for
- ``filenames_encryption_mode``.
-
-- ``flags`` must be set to a value from ``<linux/fs.h>`` which
- identifies the amount of NUL-padding to use when encrypting
- filenames. If unsure, use FS_POLICY_FLAGS_PAD_32 (0x3).
-
-- ``master_key_descriptor`` specifies how to find the master key in
- the keyring; see `Adding keys`_. It is up to userspace to choose a
- unique ``master_key_descriptor`` for each master key. The e4crypt
- and fscrypt tools use the first 8 bytes of
- ``SHA-512(SHA-512(master_key))``, but this particular scheme is not
- required. Also, the master key need not be in the keyring yet when
- FS_IOC_SET_ENCRYPTION_POLICY is executed. However, it must be added
- before any files can be created in the encrypted directory.
-
-If the file is not yet encrypted, then FS_IOC_SET_ENCRYPTION_POLICY
-verifies that the file is an empty directory. If so, the specified
-encryption policy is assigned to the directory, turning it into an
-encrypted directory. After that, and after providing the
-corresponding master key as described in `Adding keys`_, all regular
-files, directories (recursively), and symlinks created in the
-directory will be encrypted, inheriting the same encryption policy.
-The filenames in the directory's entries will be encrypted as well.
-
-Alternatively, if the file is already encrypted, then
-FS_IOC_SET_ENCRYPTION_POLICY validates that the specified encryption
-policy exactly matches the actual one. If they match, then the ioctl
-returns 0. Otherwise, it fails with EEXIST. This works on both
-regular files and directories, including nonempty directories.
-
-Note that the ext4 filesystem does not allow the root directory to be
-encrypted, even if it is empty. Users who want to encrypt an entire
-filesystem with one key should consider using dm-crypt instead.
-
-FS_IOC_SET_ENCRYPTION_POLICY can fail with the following errors:
-
-- ``EACCES``: the file is not owned by the process's uid, nor does the
- process have the CAP_FOWNER capability in a namespace with the file
- owner's uid mapped
-- ``EEXIST``: the file is already encrypted with an encryption policy
- different from the one specified
-- ``EINVAL``: an invalid encryption policy was specified (invalid
- version, mode(s), or flags)
-- ``ENOTDIR``: the file is unencrypted and is a regular file, not a
- directory
-- ``ENOTEMPTY``: the file is unencrypted and is a nonempty directory
-- ``ENOTTY``: this type of filesystem does not implement encryption
-- ``EOPNOTSUPP``: the kernel was not configured with encryption
- support for this filesystem, or the filesystem superblock has not
- had encryption enabled on it. (For example, to use encryption on an
- ext4 filesystem, CONFIG_EXT4_ENCRYPTION must be enabled in the
- kernel config, and the superblock must have had the "encrypt"
- feature flag enabled using ``tune2fs -O encrypt`` or ``mkfs.ext4 -O
- encrypt``.)
-- ``EPERM``: this directory may not be encrypted, e.g. because it is
- the root directory of an ext4 filesystem
-- ``EROFS``: the filesystem is readonly
-
-Getting an encryption policy
-----------------------------
-
-The FS_IOC_GET_ENCRYPTION_POLICY ioctl retrieves the :c:type:`struct
-fscrypt_policy`, if any, for a directory or regular file. See above
-for the struct definition. No additional permissions are required
-beyond the ability to open the file.
-
-FS_IOC_GET_ENCRYPTION_POLICY can fail with the following errors:
-
-- ``EINVAL``: the file is encrypted, but it uses an unrecognized
- encryption context format
-- ``ENODATA``: the file is not encrypted
-- ``ENOTTY``: this type of filesystem does not implement encryption
-- ``EOPNOTSUPP``: the kernel was not configured with encryption
- support for this filesystem
-
-Note: if you only need to know whether a file is encrypted or not, on
-most filesystems it is also possible to use the FS_IOC_GETFLAGS ioctl
-and check for FS_ENCRYPT_FL, or to use the statx() system call and
-check for STATX_ATTR_ENCRYPTED in stx_attributes.
-
-Getting the per-filesystem salt
--------------------------------
-
-Some filesystems, such as ext4 and F2FS, also support the deprecated
-ioctl FS_IOC_GET_ENCRYPTION_PWSALT. This ioctl retrieves a randomly
-generated 16-byte value stored in the filesystem superblock. This
-value is intended to used as a salt when deriving an encryption key
-from a passphrase or other low-entropy user credential.
-
-FS_IOC_GET_ENCRYPTION_PWSALT is deprecated. Instead, prefer to
-generate and manage any needed salt(s) in userspace.
-
-Adding keys
------------
-
-To provide a master key, userspace must add it to an appropriate
-keyring using the add_key() system call (see:
-``Documentation/security/keys/core.rst``). The key type must be
-"logon"; keys of this type are kept in kernel memory and cannot be
-read back by userspace. The key description must be "fscrypt:"
-followed by the 16-character lower case hex representation of the
-``master_key_descriptor`` that was set in the encryption policy. The
-key payload must conform to the following structure::
-
- #define FS_MAX_KEY_SIZE 64
-
- struct fscrypt_key {
- u32 mode;
- u8 raw[FS_MAX_KEY_SIZE];
- u32 size;
- };
-
-``mode`` is ignored; just set it to 0. The actual key is provided in
-``raw`` with ``size`` indicating its size in bytes. That is, the
-bytes ``raw[0..size-1]`` (inclusive) are the actual key.
-
-The key description prefix "fscrypt:" may alternatively be replaced
-with a filesystem-specific prefix such as "ext4:". However, the
-filesystem-specific prefixes are deprecated and should not be used in
-new programs.
-
-There are several different types of keyrings in which encryption keys
-may be placed, such as a session keyring, a user session keyring, or a
-user keyring. Each key must be placed in a keyring that is "attached"
-to all processes that might need to access files encrypted with it, in
-the sense that request_key() will find the key. Generally, if only
-processes belonging to a specific user need to access a given
-encrypted directory and no session keyring has been installed, then
-that directory's key should be placed in that user's user session
-keyring or user keyring. Otherwise, a session keyring should be
-installed if needed, and the key should be linked into that session
-keyring, or in a keyring linked into that session keyring.
-
-Note: introducing the complex visibility semantics of keyrings here
-was arguably a mistake --- especially given that by design, after any
-process successfully opens an encrypted file (thereby setting up the
-per-file key), possessing the keyring key is not actually required for
-any process to read/write the file until its in-memory inode is
-evicted. In the future there probably should be a way to provide keys
-directly to the filesystem instead, which would make the intended
-semantics clearer.
-
-Access semantics
-================
-
-With the key
-------------
-
-With the encryption key, encrypted regular files, directories, and
-symlinks behave very similarly to their unencrypted counterparts ---
-after all, the encryption is intended to be transparent. However,
-astute users may notice some differences in behavior:
-
-- Unencrypted files, or files encrypted with a different encryption
- policy (i.e. different key, modes, or flags), cannot be renamed or
- linked into an encrypted directory; see `Encryption policy
- enforcement`_. Attempts to do so will fail with EPERM. However,
- encrypted files can be renamed within an encrypted directory, or
- into an unencrypted directory.
-
-- Direct I/O is not supported on encrypted files. Attempts to use
- direct I/O on such files will fall back to buffered I/O.
-
-- The fallocate operations FALLOC_FL_COLLAPSE_RANGE,
- FALLOC_FL_INSERT_RANGE, and FALLOC_FL_ZERO_RANGE are not supported
- on encrypted files and will fail with EOPNOTSUPP.
-
-- Online defragmentation of encrypted files is not supported. The
- EXT4_IOC_MOVE_EXT and F2FS_IOC_MOVE_RANGE ioctls will fail with
- EOPNOTSUPP.
-
-- The ext4 filesystem does not support data journaling with encrypted
- regular files. It will fall back to ordered data mode instead.
-
-- DAX (Direct Access) is not supported on encrypted files.
-
-- The st_size of an encrypted symlink will not necessarily give the
- length of the symlink target as required by POSIX. It will actually
- give the length of the ciphertext, which will be slightly longer
- than the plaintext due to NUL-padding and an extra 2-byte overhead.
-
-- The maximum length of an encrypted symlink is 2 bytes shorter than
- the maximum length of an unencrypted symlink. For example, on an
- EXT4 filesystem with a 4K block size, unencrypted symlinks can be up
- to 4095 bytes long, while encrypted symlinks can only be up to 4093
- bytes long (both lengths excluding the terminating null).
-
-Note that mmap *is* supported. This is possible because the pagecache
-for an encrypted file contains the plaintext, not the ciphertext.
-
-Without the key
----------------
-
-Some filesystem operations may be performed on encrypted regular
-files, directories, and symlinks even before their encryption key has
-been provided:
-
-- File metadata may be read, e.g. using stat().
-
-- Directories may be listed, in which case the filenames will be
- listed in an encoded form derived from their ciphertext. The
- current encoding algorithm is described in `Filename hashing and
- encoding`_. The algorithm is subject to change, but it is
- guaranteed that the presented filenames will be no longer than
- NAME_MAX bytes, will not contain the ``/`` or ``\0`` characters, and
- will uniquely identify directory entries.
-
- The ``.`` and ``..`` directory entries are special. They are always
- present and are not encrypted or encoded.
-
-- Files may be deleted. That is, nondirectory files may be deleted
- with unlink() as usual, and empty directories may be deleted with
- rmdir() as usual. Therefore, ``rm`` and ``rm -r`` will work as
- expected.
-
-- Symlink targets may be read and followed, but they will be presented
- in encrypted form, similar to filenames in directories. Hence, they
- are unlikely to point to anywhere useful.
-
-Without the key, regular files cannot be opened or truncated.
-Attempts to do so will fail with ENOKEY. This implies that any
-regular file operations that require a file descriptor, such as
-read(), write(), mmap(), fallocate(), and ioctl(), are also forbidden.
-
-Also without the key, files of any type (including directories) cannot
-be created or linked into an encrypted directory, nor can a name in an
-encrypted directory be the source or target of a rename, nor can an
-O_TMPFILE temporary file be created in an encrypted directory. All
-such operations will fail with ENOKEY.
-
-It is not currently possible to backup and restore encrypted files
-without the encryption key. This would require special APIs which
-have not yet been implemented.
-
-Encryption policy enforcement
-=============================
-
-After an encryption policy has been set on a directory, all regular
-files, directories, and symbolic links created in that directory
-(recursively) will inherit that encryption policy. Special files ---
-that is, named pipes, device nodes, and UNIX domain sockets --- will
-not be encrypted.
-
-Except for those special files, it is forbidden to have unencrypted
-files, or files encrypted with a different encryption policy, in an
-encrypted directory tree. Attempts to link or rename such a file into
-an encrypted directory will fail with EPERM. This is also enforced
-during ->lookup() to provide limited protection against offline
-attacks that try to disable or downgrade encryption in known locations
-where applications may later write sensitive data. It is recommended
-that systems implementing a form of "verified boot" take advantage of
-this by validating all top-level encryption policies prior to access.
-
-Implementation details
-======================
-
-Encryption context
-------------------
-
-An encryption policy is represented on-disk by a :c:type:`struct
-fscrypt_context`. It is up to individual filesystems to decide where
-to store it, but normally it would be stored in a hidden extended
-attribute. It should *not* be exposed by the xattr-related system
-calls such as getxattr() and setxattr() because of the special
-semantics of the encryption xattr. (In particular, there would be
-much confusion if an encryption policy were to be added to or removed
-from anything other than an empty directory.) The struct is defined
-as follows::
-
- #define FS_KEY_DESCRIPTOR_SIZE 8
- #define FS_KEY_DERIVATION_NONCE_SIZE 16
-
- struct fscrypt_context {
- u8 format;
- u8 contents_encryption_mode;
- u8 filenames_encryption_mode;
- u8 flags;
- u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
- u8 nonce[FS_KEY_DERIVATION_NONCE_SIZE];
- };
-
-Note that :c:type:`struct fscrypt_context` contains the same
-information as :c:type:`struct fscrypt_policy` (see `Setting an
-encryption policy`_), except that :c:type:`struct fscrypt_context`
-also contains a nonce. The nonce is randomly generated by the kernel
-and is used to derive the inode's encryption key as described in
-`Per-file keys`_.
-
-Data path changes
------------------
-
-For the read path (->readpage()) of regular files, filesystems can
-read the ciphertext into the page cache and decrypt it in-place. The
-page lock must be held until decryption has finished, to prevent the
-page from becoming visible to userspace prematurely.
-
-For the write path (->writepage()) of regular files, filesystems
-cannot encrypt data in-place in the page cache, since the cached
-plaintext must be preserved. Instead, filesystems must encrypt into a
-temporary buffer or "bounce page", then write out the temporary
-buffer. Some filesystems, such as UBIFS, already use temporary
-buffers regardless of encryption. Other filesystems, such as ext4 and
-F2FS, have to allocate bounce pages specially for encryption.
-
-Filename hashing and encoding
------------------------------
-
-Modern filesystems accelerate directory lookups by using indexed
-directories. An indexed directory is organized as a tree keyed by
-filename hashes. When a ->lookup() is requested, the filesystem
-normally hashes the filename being looked up so that it can quickly
-find the corresponding directory entry, if any.
-
-With encryption, lookups must be supported and efficient both with and
-without the encryption key. Clearly, it would not work to hash the
-plaintext filenames, since the plaintext filenames are unavailable
-without the key. (Hashing the plaintext filenames would also make it
-impossible for the filesystem's fsck tool to optimize encrypted
-directories.) Instead, filesystems hash the ciphertext filenames,
-i.e. the bytes actually stored on-disk in the directory entries. When
-asked to do a ->lookup() with the key, the filesystem just encrypts
-the user-supplied name to get the ciphertext.
-
-Lookups without the key are more complicated. The raw ciphertext may
-contain the ``\0`` and ``/`` characters, which are illegal in
-filenames. Therefore, readdir() must base64-encode the ciphertext for
-presentation. For most filenames, this works fine; on ->lookup(), the
-filesystem just base64-decodes the user-supplied name to get back to
-the raw ciphertext.
-
-However, for very long filenames, base64 encoding would cause the
-filename length to exceed NAME_MAX. To prevent this, readdir()
-actually presents long filenames in an abbreviated form which encodes
-a strong "hash" of the ciphertext filename, along with the optional
-filesystem-specific hash(es) needed for directory lookups. This
-allows the filesystem to still, with a high degree of confidence, map
-the filename given in ->lookup() back to a particular directory entry
-that was previously listed by readdir(). See :c:type:`struct
-fscrypt_digested_name` in the source for more details.
-
-Note that the precise way that filenames are presented to userspace
-without the key is subject to change in the future. It is only meant
-as a way to temporarily present valid filenames so that commands like
-``rm -r`` work as expected on encrypted directories.
diff --git a/Documentation/hwmon/ina2xx b/Documentation/hwmon/ina2xx
index cfd31d9..f8bf140 100644
--- a/Documentation/hwmon/ina2xx
+++ b/Documentation/hwmon/ina2xx
@@ -32,7 +32,7 @@
Datasheet: Publicly available at the Texas Instruments website
http://www.ti.com/
-Author: Lothar Felten <l-felten@ti.com>
+Author: Lothar Felten <lothar.felten@gmail.com>
Description
-----------
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index f2139f5..fdc9af2 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -1100,12 +1100,6 @@
nopku [X86] Disable Memory Protection Keys CPU feature found
in some Intel CPUs.
- eagerfpu= [X86]
- on enable eager fpu restore
- off disable eager fpu restore
- auto selects the default scheme, which automatically
- enables eagerfpu restore for xsaveopt.
-
module.async_probe [KNL]
Enable asynchronous probe on this module.
diff --git a/Documentation/networking/ip-sysctl.txt b/Documentation/networking/ip-sysctl.txt
index e52a472..827622e 100644
--- a/Documentation/networking/ip-sysctl.txt
+++ b/Documentation/networking/ip-sysctl.txt
@@ -122,14 +122,11 @@
IP Fragmentation:
-ipfrag_high_thresh - INTEGER
- Maximum memory used to reassemble IP fragments. When
- ipfrag_high_thresh bytes of memory is allocated for this purpose,
- the fragment handler will toss packets until ipfrag_low_thresh
- is reached. This also serves as a maximum limit to namespaces
- different from the initial one.
+ipfrag_high_thresh - LONG INTEGER
+ Maximum memory used to reassemble IP fragments.
-ipfrag_low_thresh - INTEGER
+ipfrag_low_thresh - LONG INTEGER
+ (Obsolete since linux-4.17)
Maximum memory used to reassemble IP fragments before the kernel
begins to remove incomplete fragment queues to free up resources.
The kernel still accepts new fragments for defragmentation.
diff --git a/Makefile b/Makefile
index cdc22f3..5e8c574 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 4
PATCHLEVEL = 9
-SUBLEVEL = 129
+SUBLEVEL = 135
EXTRAVERSION =
NAME = Roaring Lionus
diff --git a/arch/Kconfig b/arch/Kconfig
index ad306a9..0ecbd6d 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -530,6 +530,7 @@
bool "Use clang Link Time Optimization (LTO) (EXPERIMENTAL)"
depends on ARCH_SUPPORTS_LTO_CLANG
depends on !FTRACE_MCOUNT_RECORD || HAVE_C_RECORDMCOUNT
+ depends on !KASAN
select LTO
select THIN_ARCHIVES
select LD_DEAD_CODE_DATA_ELIMINATION
diff --git a/arch/arc/Makefile b/arch/arc/Makefile
index 8447eed..a3b4560 100644
--- a/arch/arc/Makefile
+++ b/arch/arc/Makefile
@@ -8,34 +8,12 @@
UTS_MACHINE := arc
-ifeq ($(CROSS_COMPILE),)
-ifndef CONFIG_CPU_BIG_ENDIAN
-CROSS_COMPILE := arc-linux-
-else
-CROSS_COMPILE := arceb-linux-
-endif
-endif
-
KBUILD_DEFCONFIG := nsim_700_defconfig
cflags-y += -fno-common -pipe -fno-builtin -mmedium-calls -D__linux__
cflags-$(CONFIG_ISA_ARCOMPACT) += -mA7
cflags-$(CONFIG_ISA_ARCV2) += -mcpu=archs
-is_700 = $(shell $(CC) -dM -E - < /dev/null | grep -q "ARC700" && echo 1 || echo 0)
-
-ifdef CONFIG_ISA_ARCOMPACT
-ifeq ($(is_700), 0)
- $(error Toolchain not configured for ARCompact builds)
-endif
-endif
-
-ifdef CONFIG_ISA_ARCV2
-ifeq ($(is_700), 1)
- $(error Toolchain not configured for ARCv2 builds)
-endif
-endif
-
ifdef CONFIG_ARC_CURR_IN_REG
# For a global register defintion, make sure it gets passed to every file
# We had a customer reported bug where some code built in kernel was NOT using
@@ -89,7 +67,7 @@
# --build-id w/o "-marclinux". Default arc-elf32-ld is OK
ldflags-$(upto_gcc44) += -marclinux
-LIBGCC := $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
+LIBGCC = $(shell $(CC) $(cflags-y) --print-libgcc-file-name)
# Modules with short calls might break for calls into builtin-kernel
KBUILD_CFLAGS_MODULE += -mlong-calls -mno-millicode
diff --git a/arch/arc/include/asm/atomic.h b/arch/arc/include/asm/atomic.h
index 54b54da..49112f7 100644
--- a/arch/arc/include/asm/atomic.h
+++ b/arch/arc/include/asm/atomic.h
@@ -84,7 +84,7 @@
"1: llock %[orig], [%[ctr]] \n" \
" " #asm_op " %[val], %[orig], %[i] \n" \
" scond %[val], [%[ctr]] \n" \
- " \n" \
+ " bnz 1b \n" \
: [val] "=&r" (val), \
[orig] "=&r" (orig) \
: [ctr] "r" (&v->counter), \
diff --git a/arch/arc/kernel/process.c b/arch/arc/kernel/process.c
index 0e8c015..3ce1213 100644
--- a/arch/arc/kernel/process.c
+++ b/arch/arc/kernel/process.c
@@ -213,6 +213,26 @@
task_thread_info(current)->thr_ptr;
}
+
+ /*
+ * setup usermode thread pointer #1:
+ * when child is picked by scheduler, __switch_to() uses @c_callee to
+ * populate usermode callee regs: this works (despite being in a kernel
+ * function) since special return path for child @ret_from_fork()
+ * ensures those regs are not clobbered all the way to RTIE to usermode
+ */
+ c_callee->r25 = task_thread_info(p)->thr_ptr;
+
+#ifdef CONFIG_ARC_CURR_IN_REG
+ /*
+ * setup usermode thread pointer #2:
+ * however for this special use of r25 in kernel, __switch_to() sets
+ * r25 for kernel needs and only in the final return path is usermode
+ * r25 setup, from pt_regs->user_r25. So set that up as well
+ */
+ c_regs->user_r25 = c_callee->r25;
+#endif
+
return 0;
}
diff --git a/arch/arm/boot/dts/dra7.dtsi b/arch/arm/boot/dts/dra7.dtsi
index ce54a70..a1a9280 100644
--- a/arch/arm/boot/dts/dra7.dtsi
+++ b/arch/arm/boot/dts/dra7.dtsi
@@ -1770,7 +1770,7 @@
};
};
- dcan1: can@481cc000 {
+ dcan1: can@4ae3c000 {
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan1";
reg = <0x4ae3c000 0x2000>;
@@ -1780,7 +1780,7 @@
status = "disabled";
};
- dcan2: can@481d0000 {
+ dcan2: can@48480000 {
compatible = "ti,dra7-d_can";
ti,hwmods = "dcan2";
reg = <0x48480000 0x2000>;
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
index 1428f37..ba73271 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp-256.dtsi
@@ -37,3 +37,7 @@
&mhi_device {
status = "okay";
};
+
+&restart_pshold {
+ qcom,force-warm-reboot;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
index 81877b7..57bd822 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-cdp.dtsi
@@ -31,3 +31,7 @@
&mhi_device {
status = "okay";
};
+
+&restart_pshold {
+ qcom,force-warm-reboot;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
index d7c0d13..1f99451 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp-256.dtsi
@@ -37,3 +37,7 @@
&mhi_device {
status = "okay";
};
+
+&restart_pshold {
+ qcom,force-warm-reboot;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
index 6ceac6e..d8edfde 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills-pcie-ep-mtp.dtsi
@@ -31,3 +31,7 @@
&mhi_device {
status = "okay";
};
+
+&restart_pshold {
+ qcom,force-warm-reboot;
+};
diff --git a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
index f6768d8..782b1ff 100644
--- a/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
+++ b/arch/arm/boot/dts/qcom/sdxpoorwills.dtsi
@@ -257,9 +257,9 @@
qcom,cpulist = <&CPU0>;
qcom,target-dev = <&cpubw>;
qcom,core-dev-table =
- < 153600 MHZ_TO_MBPS(200, 2) >,
- < 576000 MHZ_TO_MBPS(691, 2) >,
- < 1497600 MHZ_TO_MBPS(1383, 2)>;
+ < 153600 MHZ_TO_MBPS( 200, 4) >,
+ < 576000 MHZ_TO_MBPS( 691, 4) >,
+ < 1497600 MHZ_TO_MBPS(1383, 4)>;
};
clock_gcc: qcom,gcc@100000 {
@@ -619,7 +619,7 @@
clock-frequency = <32768>;
};
- restart@c264000 {
+ restart_pshold: restart@c264000 {
compatible = "qcom,pshold";
reg = <0x0c264000 0x4>,
<0x01fd3000 0x4>;
diff --git a/arch/arm/boot/dts/sama5d3_emac.dtsi b/arch/arm/boot/dts/sama5d3_emac.dtsi
index 7cb235e..6e9e1c2 100644
--- a/arch/arm/boot/dts/sama5d3_emac.dtsi
+++ b/arch/arm/boot/dts/sama5d3_emac.dtsi
@@ -41,7 +41,7 @@
};
macb1: ethernet@f802c000 {
- compatible = "cdns,at91sam9260-macb", "cdns,macb";
+ compatible = "atmel,sama5d3-macb", "cdns,at91sam9260-macb", "cdns,macb";
reg = <0xf802c000 0x100>;
interrupts = <35 IRQ_TYPE_LEVEL_HIGH 3>;
pinctrl-names = "default";
diff --git a/arch/arm/configs/msm8909-perf_defconfig b/arch/arm/configs/msm8909-perf_defconfig
index b641f37..ade0f45 100644
--- a/arch/arm/configs/msm8909-perf_defconfig
+++ b/arch/arm/configs/msm8909-perf_defconfig
@@ -433,6 +433,7 @@
CONFIG_INV_ICM20602_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QTI_MPM=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8909_defconfig b/arch/arm/configs/msm8909_defconfig
index ccf4b59..7fba408 100644
--- a/arch/arm/configs/msm8909_defconfig
+++ b/arch/arm/configs/msm8909_defconfig
@@ -479,6 +479,7 @@
CONFIG_INV_ICM20602_IIO=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QTI_MPM=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8909w-perf_defconfig b/arch/arm/configs/msm8909w-perf_defconfig
index 59f882d..13d07d2 100644
--- a/arch/arm/configs/msm8909w-perf_defconfig
+++ b/arch/arm/configs/msm8909w-perf_defconfig
@@ -337,6 +337,7 @@
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_SPI_PANEL=y
CONFIG_FB_MSM_MDSS_MDP3=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_SOUND=y
@@ -449,6 +450,7 @@
CONFIG_QCOM_SPMI_VADC=y
CONFIG_QCOM_RRADC=y
CONFIG_PWM=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QTI_MPM=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8909w_defconfig b/arch/arm/configs/msm8909w_defconfig
index 04a60b0..56ae8fa 100644
--- a/arch/arm/configs/msm8909w_defconfig
+++ b/arch/arm/configs/msm8909w_defconfig
@@ -329,6 +329,7 @@
CONFIG_FB_MSM_MDSS_WRITEBACK=y
CONFIG_FB_MSM_MDSS_SPI_PANEL=y
CONFIG_FB_MSM_MDSS_MDP3=y
+CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
# CONFIG_LCD_CLASS_DEVICE is not set
CONFIG_BACKLIGHT_CLASS_DEVICE=y
@@ -396,6 +397,8 @@
CONFIG_MAILBOX=y
CONFIG_ARM_SMMU=y
CONFIG_QCOM_LAZY_MAPPING=y
+CONFIG_IOMMU_DEBUG=y
+CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
CONFIG_MSM_BOOT_STATS=y
@@ -444,6 +447,7 @@
CONFIG_QCOM_SPMI_VADC=y
CONFIG_QCOM_RRADC=y
CONFIG_PWM=y
+CONFIG_QCOM_SHOW_RESUME_IRQ=y
CONFIG_QTI_MPM=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
diff --git a/arch/arm/configs/msm8937-perf_defconfig b/arch/arm/configs/msm8937-perf_defconfig
index c47203f..73e3c87 100644
--- a/arch/arm/configs/msm8937-perf_defconfig
+++ b/arch/arm/configs/msm8937-perf_defconfig
@@ -487,6 +487,7 @@
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
CONFIG_USB_CONFIGFS_RNDIS=y
CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
diff --git a/arch/arm/configs/msm8937_defconfig b/arch/arm/configs/msm8937_defconfig
index e7017ec..a74358c 100644
--- a/arch/arm/configs/msm8937_defconfig
+++ b/arch/arm/configs/msm8937_defconfig
@@ -496,6 +496,7 @@
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
CONFIG_USB_CONFIGFS_RNDIS=y
CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
diff --git a/arch/arm/configs/sdxpoorwills-auto-perf_defconfig b/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
index 1fe7178..f26242f 100644
--- a/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
+++ b/arch/arm/configs/sdxpoorwills-auto-perf_defconfig
@@ -103,6 +103,7 @@
CONFIG_NETFILTER_XT_MATCH_DSCP=y
CONFIG_NETFILTER_XT_MATCH_ESP=y
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
@@ -152,6 +153,11 @@
CONFIG_BRIDGE_EBT_ARPREPLY=y
CONFIG_BRIDGE_EBT_DNAT=y
CONFIG_BRIDGE_EBT_SNAT=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
CONFIG_BRIDGE=y
CONFIG_VLAN_8021Q=y
CONFIG_NET_SCHED=y
@@ -209,6 +215,7 @@
# CONFIG_NET_VENDOR_STMICRO is not set
CONFIG_AT803X_PHY=y
CONFIG_PPP=y
+CONFIG_PPPOL2TP=y
CONFIG_PPP_ASYNC=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
@@ -228,6 +235,11 @@
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_GPIO=m
+CONFIG_BOSCH_DRIVER_LOG_FUNC=y
+CONFIG_SENSORS_SMI_ACC2X2=y
+CONFIG_SENSORS_SMI_ACC2X2_ENABLE_INT2=y
+CONFIG_SENSORS_SMI_GYRO=y
+CONFIG_SENSORS_SMI_GYRO_FIFO=y
CONFIG_SERIO_LIBPS2=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_MSM=y
@@ -397,6 +409,11 @@
CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_EXTCON_QCOM_SPMI_MISC=y
CONFIG_IIO=y
+CONFIG_INV_MPU_IIO_IAM20680=y
+CONFIG_INV_MPU_IIO_I2C=y
+CONFIG_INV_MPU_IIO_SPI=y
+CONFIG_INV_TESTING=y
+CONFIG_IIO_ST_ASM330LHH=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_QCOM_SHOW_RESUME_IRQ=y
diff --git a/arch/arm/configs/sdxpoorwills-auto_defconfig b/arch/arm/configs/sdxpoorwills-auto_defconfig
index 9a62d53..705a560 100644
--- a/arch/arm/configs/sdxpoorwills-auto_defconfig
+++ b/arch/arm/configs/sdxpoorwills-auto_defconfig
@@ -105,6 +105,7 @@
CONFIG_NETFILTER_XT_MATCH_DSCP=y
CONFIG_NETFILTER_XT_MATCH_ESP=y
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
+# CONFIG_NETFILTER_XT_MATCH_L2TP is not set
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y
CONFIG_NETFILTER_XT_MATCH_PHYSDEV=m
@@ -154,6 +155,11 @@
CONFIG_BRIDGE_EBT_ARPREPLY=y
CONFIG_BRIDGE_EBT_DNAT=y
CONFIG_BRIDGE_EBT_SNAT=y
+CONFIG_L2TP=y
+CONFIG_L2TP_DEBUGFS=y
+CONFIG_L2TP_V3=y
+CONFIG_L2TP_IP=y
+CONFIG_L2TP_ETH=y
CONFIG_BRIDGE=y
CONFIG_VLAN_8021Q=y
CONFIG_NET_SCHED=y
@@ -203,6 +209,7 @@
# CONFIG_NET_VENDOR_STMICRO is not set
CONFIG_AT803X_PHY=y
CONFIG_PPP=y
+CONFIG_PPPOL2TP=y
CONFIG_PPP_ASYNC=y
CONFIG_USB_USBNET=y
CONFIG_USB_NET_SMSC75XX=y
@@ -222,6 +229,11 @@
CONFIG_INPUT_QPNP_POWER_ON=y
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_GPIO=m
+CONFIG_BOSCH_DRIVER_LOG_FUNC=y
+CONFIG_SENSORS_SMI_ACC2X2=y
+CONFIG_SENSORS_SMI_ACC2X2_ENABLE_INT2=y
+CONFIG_SENSORS_SMI_GYRO=y
+CONFIG_SENSORS_SMI_GYRO_FIFO=y
CONFIG_SERIO_LIBPS2=y
# CONFIG_LEGACY_PTYS is not set
CONFIG_SERIAL_MSM=y
@@ -402,6 +414,11 @@
CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_EXTCON_QCOM_SPMI_MISC=y
CONFIG_IIO=y
+CONFIG_INV_MPU_IIO_IAM20680=y
+CONFIG_INV_MPU_IIO_I2C=y
+CONFIG_INV_MPU_IIO_SPI=y
+CONFIG_INV_TESTING=y
+CONFIG_IIO_ST_ASM330LHH=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_QCOM_SHOW_RESUME_IRQ=y
diff --git a/arch/arm/crypto/Kconfig b/arch/arm/crypto/Kconfig
index 72aa5ec..27ed1b1 100644
--- a/arch/arm/crypto/Kconfig
+++ b/arch/arm/crypto/Kconfig
@@ -120,11 +120,4 @@
that uses the 64x64 to 128 bit polynomial multiplication (vmull.p64)
that is part of the ARMv8 Crypto Extensions
-config CRYPTO_SPECK_NEON
- tristate "NEON accelerated Speck cipher algorithms"
- depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
- select CRYPTO_GF128MUL
- select CRYPTO_SPECK
-
endif
diff --git a/arch/arm/crypto/Makefile b/arch/arm/crypto/Makefile
index 1d0448a..fc51507 100644
--- a/arch/arm/crypto/Makefile
+++ b/arch/arm/crypto/Makefile
@@ -8,7 +8,6 @@
obj-$(CONFIG_CRYPTO_SHA1_ARM_NEON) += sha1-arm-neon.o
obj-$(CONFIG_CRYPTO_SHA256_ARM) += sha256-arm.o
obj-$(CONFIG_CRYPTO_SHA512_ARM) += sha512-arm.o
-obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
ce-obj-$(CONFIG_CRYPTO_AES_ARM_CE) += aes-arm-ce.o
ce-obj-$(CONFIG_CRYPTO_SHA1_ARM_CE) += sha1-arm-ce.o
@@ -37,7 +36,6 @@
sha2-arm-ce-y := sha2-ce-core.o sha2-ce-glue.o
aes-arm-ce-y := aes-ce-core.o aes-ce-glue.o
ghash-arm-ce-y := ghash-ce-core.o ghash-ce-glue.o
-speck-neon-y := speck-neon-core.o speck-neon-glue.o
quiet_cmd_perl = PERL $@
cmd_perl = $(PERL) $(<) > $(@)
diff --git a/arch/arm/crypto/speck-neon-core.S b/arch/arm/crypto/speck-neon-core.S
deleted file mode 100644
index 3c1e203..0000000
--- a/arch/arm/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,432 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Author: Eric Biggers <ebiggers@google.com>
- */
-
-#include <linux/linkage.h>
-
- .text
- .fpu neon
-
- // arguments
- ROUND_KEYS .req r0 // const {u64,u32} *round_keys
- NROUNDS .req r1 // int nrounds
- DST .req r2 // void *dst
- SRC .req r3 // const void *src
- NBYTES .req r4 // unsigned int nbytes
- TWEAK .req r5 // void *tweak
-
- // registers which hold the data being encrypted/decrypted
- X0 .req q0
- X0_L .req d0
- X0_H .req d1
- Y0 .req q1
- Y0_H .req d3
- X1 .req q2
- X1_L .req d4
- X1_H .req d5
- Y1 .req q3
- Y1_H .req d7
- X2 .req q4
- X2_L .req d8
- X2_H .req d9
- Y2 .req q5
- Y2_H .req d11
- X3 .req q6
- X3_L .req d12
- X3_H .req d13
- Y3 .req q7
- Y3_H .req d15
-
- // the round key, duplicated in all lanes
- ROUND_KEY .req q8
- ROUND_KEY_L .req d16
- ROUND_KEY_H .req d17
-
- // index vector for vtbl-based 8-bit rotates
- ROTATE_TABLE .req d18
-
- // multiplication table for updating XTS tweaks
- GF128MUL_TABLE .req d19
- GF64MUL_TABLE .req d19
-
- // current XTS tweak value(s)
- TWEAKV .req q10
- TWEAKV_L .req d20
- TWEAKV_H .req d21
-
- TMP0 .req q12
- TMP0_L .req d24
- TMP0_H .req d25
- TMP1 .req q13
- TMP2 .req q14
- TMP3 .req q15
-
- .align 4
-.Lror64_8_table:
- .byte 1, 2, 3, 4, 5, 6, 7, 0
-.Lror32_8_table:
- .byte 1, 2, 3, 0, 5, 6, 7, 4
-.Lrol64_8_table:
- .byte 7, 0, 1, 2, 3, 4, 5, 6
-.Lrol32_8_table:
- .byte 3, 0, 1, 2, 7, 4, 5, 6
-.Lgf128mul_table:
- .byte 0, 0x87
- .fill 14
-.Lgf64mul_table:
- .byte 0, 0x1b, (0x1b << 1), (0x1b << 1) ^ 0x1b
- .fill 12
-
-/*
- * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
- *
- * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
- * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
- * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
- *
- * The 8-bit rotates are implemented using vtbl instead of vshr + vsli because
- * the vtbl approach is faster on some processors and the same speed on others.
- */
-.macro _speck_round_128bytes n
-
- // x = ror(x, 8)
- vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
- vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
- vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
- vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
- vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
- vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
- vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
- vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
-
- // x += y
- vadd.u\n X0, Y0
- vadd.u\n X1, Y1
- vadd.u\n X2, Y2
- vadd.u\n X3, Y3
-
- // x ^= k
- veor X0, ROUND_KEY
- veor X1, ROUND_KEY
- veor X2, ROUND_KEY
- veor X3, ROUND_KEY
-
- // y = rol(y, 3)
- vshl.u\n TMP0, Y0, #3
- vshl.u\n TMP1, Y1, #3
- vshl.u\n TMP2, Y2, #3
- vshl.u\n TMP3, Y3, #3
- vsri.u\n TMP0, Y0, #(\n - 3)
- vsri.u\n TMP1, Y1, #(\n - 3)
- vsri.u\n TMP2, Y2, #(\n - 3)
- vsri.u\n TMP3, Y3, #(\n - 3)
-
- // y ^= x
- veor Y0, TMP0, X0
- veor Y1, TMP1, X1
- veor Y2, TMP2, X2
- veor Y3, TMP3, X3
-.endm
-
-/*
- * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
- *
- * This is the inverse of _speck_round_128bytes().
- */
-.macro _speck_unround_128bytes n
-
- // y ^= x
- veor TMP0, Y0, X0
- veor TMP1, Y1, X1
- veor TMP2, Y2, X2
- veor TMP3, Y3, X3
-
- // y = ror(y, 3)
- vshr.u\n Y0, TMP0, #3
- vshr.u\n Y1, TMP1, #3
- vshr.u\n Y2, TMP2, #3
- vshr.u\n Y3, TMP3, #3
- vsli.u\n Y0, TMP0, #(\n - 3)
- vsli.u\n Y1, TMP1, #(\n - 3)
- vsli.u\n Y2, TMP2, #(\n - 3)
- vsli.u\n Y3, TMP3, #(\n - 3)
-
- // x ^= k
- veor X0, ROUND_KEY
- veor X1, ROUND_KEY
- veor X2, ROUND_KEY
- veor X3, ROUND_KEY
-
- // x -= y
- vsub.u\n X0, Y0
- vsub.u\n X1, Y1
- vsub.u\n X2, Y2
- vsub.u\n X3, Y3
-
- // x = rol(x, 8);
- vtbl.8 X0_L, {X0_L}, ROTATE_TABLE
- vtbl.8 X0_H, {X0_H}, ROTATE_TABLE
- vtbl.8 X1_L, {X1_L}, ROTATE_TABLE
- vtbl.8 X1_H, {X1_H}, ROTATE_TABLE
- vtbl.8 X2_L, {X2_L}, ROTATE_TABLE
- vtbl.8 X2_H, {X2_H}, ROTATE_TABLE
- vtbl.8 X3_L, {X3_L}, ROTATE_TABLE
- vtbl.8 X3_H, {X3_H}, ROTATE_TABLE
-.endm
-
-.macro _xts128_precrypt_one dst_reg, tweak_buf, tmp
-
- // Load the next source block
- vld1.8 {\dst_reg}, [SRC]!
-
- // Save the current tweak in the tweak buffer
- vst1.8 {TWEAKV}, [\tweak_buf:128]!
-
- // XOR the next source block with the current tweak
- veor \dst_reg, TWEAKV
-
- /*
- * Calculate the next tweak by multiplying the current one by x,
- * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
- */
- vshr.u64 \tmp, TWEAKV, #63
- vshl.u64 TWEAKV, #1
- veor TWEAKV_H, \tmp\()_L
- vtbl.8 \tmp\()_H, {GF128MUL_TABLE}, \tmp\()_H
- veor TWEAKV_L, \tmp\()_H
-.endm
-
-.macro _xts64_precrypt_two dst_reg, tweak_buf, tmp
-
- // Load the next two source blocks
- vld1.8 {\dst_reg}, [SRC]!
-
- // Save the current two tweaks in the tweak buffer
- vst1.8 {TWEAKV}, [\tweak_buf:128]!
-
- // XOR the next two source blocks with the current two tweaks
- veor \dst_reg, TWEAKV
-
- /*
- * Calculate the next two tweaks by multiplying the current ones by x^2,
- * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
- */
- vshr.u64 \tmp, TWEAKV, #62
- vshl.u64 TWEAKV, #2
- vtbl.8 \tmp\()_L, {GF64MUL_TABLE}, \tmp\()_L
- vtbl.8 \tmp\()_H, {GF64MUL_TABLE}, \tmp\()_H
- veor TWEAKV, \tmp
-.endm
-
-/*
- * _speck_xts_crypt() - Speck-XTS encryption/decryption
- *
- * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
- * using Speck-XTS, specifically the variant with a block size of '2n' and round
- * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
- * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
- * nonzero multiple of 128.
- */
-.macro _speck_xts_crypt n, decrypting
- push {r4-r7}
- mov r7, sp
-
- /*
- * The first four parameters were passed in registers r0-r3. Load the
- * additional parameters, which were passed on the stack.
- */
- ldr NBYTES, [sp, #16]
- ldr TWEAK, [sp, #20]
-
- /*
- * If decrypting, modify the ROUND_KEYS parameter to point to the last
- * round key rather than the first, since for decryption the round keys
- * are used in reverse order.
- */
-.if \decrypting
-.if \n == 64
- add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #3
- sub ROUND_KEYS, #8
-.else
- add ROUND_KEYS, ROUND_KEYS, NROUNDS, lsl #2
- sub ROUND_KEYS, #4
-.endif
-.endif
-
- // Load the index vector for vtbl-based 8-bit rotates
-.if \decrypting
- ldr r12, =.Lrol\n\()_8_table
-.else
- ldr r12, =.Lror\n\()_8_table
-.endif
- vld1.8 {ROTATE_TABLE}, [r12:64]
-
- // One-time XTS preparation
-
- /*
- * Allocate stack space to store 128 bytes worth of tweaks. For
- * performance, this space is aligned to a 16-byte boundary so that we
- * can use the load/store instructions that declare 16-byte alignment.
- */
- sub sp, #128
- bic sp, #0xf
-
-.if \n == 64
- // Load first tweak
- vld1.8 {TWEAKV}, [TWEAK]
-
- // Load GF(2^128) multiplication table
- ldr r12, =.Lgf128mul_table
- vld1.8 {GF128MUL_TABLE}, [r12:64]
-.else
- // Load first tweak
- vld1.8 {TWEAKV_L}, [TWEAK]
-
- // Load GF(2^64) multiplication table
- ldr r12, =.Lgf64mul_table
- vld1.8 {GF64MUL_TABLE}, [r12:64]
-
- // Calculate second tweak, packing it together with the first
- vshr.u64 TMP0_L, TWEAKV_L, #63
- vtbl.u8 TMP0_L, {GF64MUL_TABLE}, TMP0_L
- vshl.u64 TWEAKV_H, TWEAKV_L, #1
- veor TWEAKV_H, TMP0_L
-.endif
-
-.Lnext_128bytes_\@:
-
- /*
- * Load the source blocks into {X,Y}[0-3], XOR them with their XTS tweak
- * values, and save the tweaks on the stack for later. Then
- * de-interleave the 'x' and 'y' elements of each block, i.e. make it so
- * that the X[0-3] registers contain only the second halves of blocks,
- * and the Y[0-3] registers contain only the first halves of blocks.
- * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
- */
- mov r12, sp
-.if \n == 64
- _xts128_precrypt_one X0, r12, TMP0
- _xts128_precrypt_one Y0, r12, TMP0
- _xts128_precrypt_one X1, r12, TMP0
- _xts128_precrypt_one Y1, r12, TMP0
- _xts128_precrypt_one X2, r12, TMP0
- _xts128_precrypt_one Y2, r12, TMP0
- _xts128_precrypt_one X3, r12, TMP0
- _xts128_precrypt_one Y3, r12, TMP0
- vswp X0_L, Y0_H
- vswp X1_L, Y1_H
- vswp X2_L, Y2_H
- vswp X3_L, Y3_H
-.else
- _xts64_precrypt_two X0, r12, TMP0
- _xts64_precrypt_two Y0, r12, TMP0
- _xts64_precrypt_two X1, r12, TMP0
- _xts64_precrypt_two Y1, r12, TMP0
- _xts64_precrypt_two X2, r12, TMP0
- _xts64_precrypt_two Y2, r12, TMP0
- _xts64_precrypt_two X3, r12, TMP0
- _xts64_precrypt_two Y3, r12, TMP0
- vuzp.32 Y0, X0
- vuzp.32 Y1, X1
- vuzp.32 Y2, X2
- vuzp.32 Y3, X3
-.endif
-
- // Do the cipher rounds
-
- mov r12, ROUND_KEYS
- mov r6, NROUNDS
-
-.Lnext_round_\@:
-.if \decrypting
-.if \n == 64
- vld1.64 ROUND_KEY_L, [r12]
- sub r12, #8
- vmov ROUND_KEY_H, ROUND_KEY_L
-.else
- vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]
- sub r12, #4
-.endif
- _speck_unround_128bytes \n
-.else
-.if \n == 64
- vld1.64 ROUND_KEY_L, [r12]!
- vmov ROUND_KEY_H, ROUND_KEY_L
-.else
- vld1.32 {ROUND_KEY_L[],ROUND_KEY_H[]}, [r12]!
-.endif
- _speck_round_128bytes \n
-.endif
- subs r6, r6, #1
- bne .Lnext_round_\@
-
- // Re-interleave the 'x' and 'y' elements of each block
-.if \n == 64
- vswp X0_L, Y0_H
- vswp X1_L, Y1_H
- vswp X2_L, Y2_H
- vswp X3_L, Y3_H
-.else
- vzip.32 Y0, X0
- vzip.32 Y1, X1
- vzip.32 Y2, X2
- vzip.32 Y3, X3
-.endif
-
- // XOR the encrypted/decrypted blocks with the tweaks we saved earlier
- mov r12, sp
- vld1.8 {TMP0, TMP1}, [r12:128]!
- vld1.8 {TMP2, TMP3}, [r12:128]!
- veor X0, TMP0
- veor Y0, TMP1
- veor X1, TMP2
- veor Y1, TMP3
- vld1.8 {TMP0, TMP1}, [r12:128]!
- vld1.8 {TMP2, TMP3}, [r12:128]!
- veor X2, TMP0
- veor Y2, TMP1
- veor X3, TMP2
- veor Y3, TMP3
-
- // Store the ciphertext in the destination buffer
- vst1.8 {X0, Y0}, [DST]!
- vst1.8 {X1, Y1}, [DST]!
- vst1.8 {X2, Y2}, [DST]!
- vst1.8 {X3, Y3}, [DST]!
-
- // Continue if there are more 128-byte chunks remaining, else return
- subs NBYTES, #128
- bne .Lnext_128bytes_\@
-
- // Store the next tweak
-.if \n == 64
- vst1.8 {TWEAKV}, [TWEAK]
-.else
- vst1.8 {TWEAKV_L}, [TWEAK]
-.endif
-
- mov sp, r7
- pop {r4-r7}
- bx lr
-.endm
-
-ENTRY(speck128_xts_encrypt_neon)
- _speck_xts_crypt n=64, decrypting=0
-ENDPROC(speck128_xts_encrypt_neon)
-
-ENTRY(speck128_xts_decrypt_neon)
- _speck_xts_crypt n=64, decrypting=1
-ENDPROC(speck128_xts_decrypt_neon)
-
-ENTRY(speck64_xts_encrypt_neon)
- _speck_xts_crypt n=32, decrypting=0
-ENDPROC(speck64_xts_encrypt_neon)
-
-ENTRY(speck64_xts_decrypt_neon)
- _speck_xts_crypt n=32, decrypting=1
-ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm/crypto/speck-neon-glue.c b/arch/arm/crypto/speck-neon-glue.c
deleted file mode 100644
index ea36c3a..0000000
--- a/arch/arm/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,314 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Note: the NIST recommendation for XTS only specifies a 128-bit block size,
- * but a 64-bit version (needed for Speck64) is fairly straightforward; the math
- * is just done in GF(2^64) instead of GF(2^128), with the reducing polynomial
- * x^64 + x^4 + x^3 + x + 1 from the original XEX paper (Rogaway, 2004:
- * "Efficient Instantiations of Tweakable Blockciphers and Refinements to Modes
- * OCB and PMAC"), represented as 0x1B.
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <crypto/algapi.h>
-#include <crypto/gf128mul.h>
-#include <crypto/speck.h>
-#include <crypto/xts.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-/* The assembly functions only handle multiples of 128 bytes */
-#define SPECK_NEON_CHUNK_SIZE 128
-
-/* Speck128 */
-
-struct speck128_xts_tfm_ctx {
- struct speck128_tfm_ctx main_key;
- struct speck128_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck128_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- speck128_crypt_one_t crypt_one,
- speck128_xts_crypt_many_t crypt_many)
-{
- struct crypto_blkcipher *tfm = desc->tfm;
- const struct speck128_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct blkcipher_walk walk;
- le128 tweak;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE);
-
- crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- le128_xor((le128 *)dst, (const le128 *)src, &tweak);
- (*crypt_one)(&ctx->main_key, dst, dst);
- le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
- gf128mul_x_ble((be128 *)&tweak, (const be128 *)&tweak);
-
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
-}
-
-static int speck128_xts_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck128_xts_crypt(desc, dst, src, nbytes,
- crypto_speck128_encrypt,
- speck128_xts_encrypt_neon);
-}
-
-static int speck128_xts_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck128_xts_crypt(desc, dst, src, nbytes,
- crypto_speck128_decrypt,
- speck128_xts_decrypt_neon);
-}
-
-static int speck128_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck128_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- if (keylen % 2)
- return -EINVAL;
-
- keylen /= 2;
-
- err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-/* Speck64 */
-
-struct speck64_xts_tfm_ctx {
- struct speck64_tfm_ctx main_key;
- struct speck64_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck64_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- speck64_crypt_one_t crypt_one,
- speck64_xts_crypt_many_t crypt_many)
-{
- struct crypto_blkcipher *tfm = desc->tfm;
- const struct speck64_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct blkcipher_walk walk;
- __le64 tweak;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE);
-
- crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- *(__le64 *)dst = *(__le64 *)src ^ tweak;
- (*crypt_one)(&ctx->main_key, dst, dst);
- *(__le64 *)dst ^= tweak;
- tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
- ((tweak & cpu_to_le64(1ULL << 63)) ?
- 0x1B : 0));
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
-}
-
-static int speck64_xts_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck64_xts_crypt(desc, dst, src, nbytes,
- crypto_speck64_encrypt,
- speck64_xts_encrypt_neon);
-}
-
-static int speck64_xts_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck64_xts_crypt(desc, dst, src, nbytes,
- crypto_speck64_decrypt,
- speck64_xts_decrypt_neon);
-}
-
-static int speck64_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck64_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- if (keylen % 2)
- return -EINVAL;
-
- keylen /= 2;
-
- err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-static struct crypto_alg speck_algs[] = {
- {
- .cra_name = "xts(speck128)",
- .cra_driver_name = "xts-speck128-neon",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = SPECK128_BLOCK_SIZE,
- .cra_type = &crypto_blkcipher_type,
- .cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
- .cra_alignmask = 7,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * SPECK128_128_KEY_SIZE,
- .max_keysize = 2 * SPECK128_256_KEY_SIZE,
- .ivsize = SPECK128_BLOCK_SIZE,
- .setkey = speck128_xts_setkey,
- .encrypt = speck128_xts_encrypt,
- .decrypt = speck128_xts_decrypt,
- }
- }
- }, {
- .cra_name = "xts(speck64)",
- .cra_driver_name = "xts-speck64-neon",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = SPECK64_BLOCK_SIZE,
- .cra_type = &crypto_blkcipher_type,
- .cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
- .cra_alignmask = 7,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * SPECK64_96_KEY_SIZE,
- .max_keysize = 2 * SPECK64_128_KEY_SIZE,
- .ivsize = SPECK64_BLOCK_SIZE,
- .setkey = speck64_xts_setkey,
- .encrypt = speck64_xts_encrypt,
- .decrypt = speck64_xts_decrypt,
- }
- }
- }
-};
-
-static int __init speck_neon_module_init(void)
-{
- if (!(elf_hwcap & HWCAP_NEON))
- return -ENODEV;
- return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-static void __exit speck_neon_module_exit(void)
-{
- crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-module_init(speck_neon_module_init);
-module_exit(speck_neon_module_exit);
-
-MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
-MODULE_ALIAS_CRYPTO("xts(speck128)");
-MODULE_ALIAS_CRYPTO("xts-speck128-neon");
-MODULE_ALIAS_CRYPTO("xts(speck64)");
-MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm/mach-mvebu/pmsu.c b/arch/arm/mach-mvebu/pmsu.c
index f39bd51..faaf7c3 100644
--- a/arch/arm/mach-mvebu/pmsu.c
+++ b/arch/arm/mach-mvebu/pmsu.c
@@ -116,8 +116,8 @@
PMSU_BOOT_ADDR_REDIRECT_OFFSET(hw_cpu));
}
-extern unsigned char mvebu_boot_wa_start;
-extern unsigned char mvebu_boot_wa_end;
+extern unsigned char mvebu_boot_wa_start[];
+extern unsigned char mvebu_boot_wa_end[];
/*
* This function sets up the boot address workaround needed for SMP
@@ -130,7 +130,7 @@
phys_addr_t resume_addr_reg)
{
void __iomem *sram_virt_base;
- u32 code_len = &mvebu_boot_wa_end - &mvebu_boot_wa_start;
+ u32 code_len = mvebu_boot_wa_end - mvebu_boot_wa_start;
mvebu_mbus_del_window(BOOTROM_BASE, BOOTROM_SIZE);
mvebu_mbus_add_window_by_id(crypto_eng_target, crypto_eng_attribute,
diff --git a/arch/arm/mach-omap2/omap_hwmod_reset.c b/arch/arm/mach-omap2/omap_hwmod_reset.c
index b68f9c0..d5ddba0 100644
--- a/arch/arm/mach-omap2/omap_hwmod_reset.c
+++ b/arch/arm/mach-omap2/omap_hwmod_reset.c
@@ -92,11 +92,13 @@
*/
void omap_hwmod_rtc_unlock(struct omap_hwmod *oh)
{
- local_irq_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
omap_rtc_wait_not_busy(oh);
omap_hwmod_write(OMAP_RTC_KICK0_VALUE, oh, OMAP_RTC_KICK0_REG);
omap_hwmod_write(OMAP_RTC_KICK1_VALUE, oh, OMAP_RTC_KICK1_REG);
- local_irq_enable();
+ local_irq_restore(flags);
}
/**
@@ -110,9 +112,11 @@
*/
void omap_hwmod_rtc_lock(struct omap_hwmod *oh)
{
- local_irq_disable();
+ unsigned long flags;
+
+ local_irq_save(flags);
omap_rtc_wait_not_busy(oh);
omap_hwmod_write(0x0, oh, OMAP_RTC_KICK0_REG);
omap_hwmod_write(0x0, oh, OMAP_RTC_KICK1_REG);
- local_irq_enable();
+ local_irq_restore(flags);
}
diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile
index 2b265a7..826f47b 100644
--- a/arch/arm64/Makefile
+++ b/arch/arm64/Makefile
@@ -81,7 +81,7 @@
KBUILD_CFLAGS_MODULE += -mcmodel=large
ifeq ($(CONFIG_LTO_CLANG), y)
# Code model is not stored in LLVM IR, so we need to pass it also to LLVMgold
-LDFLAGS += -plugin-opt=-code-model=large
+KBUILD_LDFLAGS_MODULE += -plugin-opt=-code-model=large
endif
endif
diff --git a/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi b/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
index ecf28c5..47a925e 100644
--- a/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8909w-pm660-regulator.dtsi
@@ -266,9 +266,9 @@
rpm-regulator-ldoa15 {
status = "okay";
pm660_l15: regulator-l15 {
- regulator-min-microvolt = <3300000>;
- regulator-max-microvolt = <3300000>;
- qcom,init-voltage = <3300000>;
+ regulator-min-microvolt = <3000000>;
+ regulator-max-microvolt = <3000000>;
+ qcom,init-voltage = <3000000>;
status = "okay";
};
};
diff --git a/arch/arm64/boot/dts/qcom/msm8917-camera-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/msm8917-camera-pinctrl.dtsi
index 33cc8ae..79fb41c 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-camera-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-camera-pinctrl.dtsi
@@ -160,6 +160,33 @@
};
};
+cam_sensor_rear_vdig_qm215: cam_sensor_rear_vdig_qm215 {
+ /* VDIG */
+ mux {
+ pins = "gpio25";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio25";
+ output-high;
+ drive-strength = <2>; /* 2 MA */
+ };
+};
+
+cam_sensor_rear_vdig_sleep_qm215: cam_sensor_rear_vdig_sleep_qm215 {
+ /* VDIG */
+ mux {
+ pins = "gpio25";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio25";
+ output-low;
+ drive-strength = <2>; /* 2 MA */
+ };
+};
cam_sensor_mclk1_default: cam_sensor_mclk1_default {
/* MCLK1 */
mux {
diff --git a/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi b/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
index 7eb4d38..f86bfeb 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-cpu.dtsi
@@ -135,8 +135,11 @@
CPU_COST_0: core-cost0 {
busy-cost-data = <
960000 159
- 1094000 207
+ 998400 172
+ 1094400 207
+ 1209600 244
1248000 256
+ 1305600 283
1401000 327
1497600 343
>;
@@ -147,8 +150,11 @@
CLUSTER_COST_0: cluster-cost0 {
busy-cost-data = <
960000 53
- 1094000 61
+ 998400 55
+ 1094400 61
+ 1209600 68
1248000 71
+ 1305600 77
1401000 85
1497600 88
>;
diff --git a/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi b/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
index 6a4c10e..5c63ed3 100644
--- a/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8917-qrd.dtsi
@@ -13,7 +13,6 @@
#include <dt-bindings/clock/msm-clocks-8952.h>
#include "msm8917-camera-sensor-qrd.dtsi"
#include "msm8937-mdss-panels.dtsi"
-#include "msm8917-pmi8937.dtsi"
&blsp1_uart2 {
status = "ok";
diff --git a/arch/arm64/boot/dts/qcom/msm8937-camera.dtsi b/arch/arm64/boot/dts/qcom/msm8937-camera.dtsi
index 831ce61..b5f467e 100644
--- a/arch/arm64/boot/dts/qcom/msm8937-camera.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937-camera.dtsi
@@ -24,7 +24,7 @@
qcom,csiphy@1b34000 {
status = "ok";
cell-index = <0>;
- compatible = "qcom,csiphy-v10.00", "qcom,csiphy";
+ compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
reg = <0x1b34000 0x1000>,
<0x1b00030 0x4>;
reg-names = "csiphy", "csiphy_clk_mux";
@@ -45,7 +45,7 @@
qcom,csiphy@1b35000 {
status = "ok";
cell-index = <1>;
- compatible = "qcom,csiphy-v10.00", "qcom,csiphy";
+ compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
reg = <0x1b35000 0x1000>,
<0x1b00038 0x4>;
reg-names = "csiphy", "csiphy_clk_mux";
diff --git a/arch/arm64/boot/dts/qcom/msm8937.dtsi b/arch/arm64/boot/dts/qcom/msm8937.dtsi
index 33a4f67..5006f28 100644
--- a/arch/arm64/boot/dts/qcom/msm8937.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8937.dtsi
@@ -160,7 +160,6 @@
};
-#include "msm8937-camera.dtsi"
#include "msm8937-pinctrl.dtsi"
#include "msm8937-cpu.dtsi"
#include "msm8937-ion.dtsi"
@@ -1819,20 +1818,13 @@
status = "ok";
};
- qcom,csiphy@1b34000 {
- compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
- };
-
- qcom,csiphy@1b35000 {
- compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
- };
-
};
#include "pm8937-rpm-regulator.dtsi"
#include "msm8937-regulator.dtsi"
#include "pm8937.dtsi"
#include "msm8937-audio.dtsi"
+#include "msm8937-camera.dtsi"
#include "msm-gdsc-8916.dtsi"
#include "msm8937-coresight.dtsi"
#include "msm8937-thermal.dtsi"
diff --git a/arch/arm64/boot/dts/qcom/msm8940.dtsi b/arch/arm64/boot/dts/qcom/msm8940.dtsi
index 64fbaf9..f1a3abe 100644
--- a/arch/arm64/boot/dts/qcom/msm8940.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8940.dtsi
@@ -110,6 +110,101 @@
qcom,bw-dwnstep = <4000>;
qcom,max-vote = <4000>;
};
+
+ /delete-node/ funnel@6120000;
+ funnel_right: funnel@6120000 {
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b908>;
+
+ reg = <0x6120000 0x1000>;
+ reg-names = "funnel-base";
+
+ coresight-name = "coresight-funnel-right";
+
+ clocks = <&clock_gcc clk_qdss_clk>,
+ <&clock_gcc clk_qdss_a_clk>;
+ clock-names = "apb_pclk";
+
+ ports {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ port@0 {
+ funnel_right_out_funnel_in0: endpoint {
+ remote-endpoint =
+ <&funnel_in0_in_funnel_right>;
+ };
+ };
+
+ port@1 {
+ reg = <1>;
+ funnel_right_in_modem_etm1: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&modem_etm1_out_funnel_right>;
+ };
+ };
+
+ port@2 {
+ reg = <2>;
+ funnel_right_in_modem_etm0: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&modem_etm0_out_funnel_right>;
+ };
+ };
+
+ port@3 {
+ reg = <3>;
+ funnel_right_in_funnel_apss: endpoint {
+ slave-mode;
+ remote-endpoint =
+ <&funnel_apss_out_funnel_right>;
+ };
+ };
+ };
+ };
+
+ /delete-node/ cti@6124000;
+ cti_modem_cpu0: cti@6128000{
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+
+ reg = <0x6128000 0x1000>;
+ reg-names = "cti-base";
+ coresight-name = "coresight-cti-modem-cpu0";
+
+ clocks = <&clock_gcc clk_qdss_clk>,
+ <&clock_gcc clk_qdss_a_clk>;
+ clock-names = "apb_pclk";
+ };
+
+
+ cti_modem_cpu1: cti@6124000{
+ compatible = "arm,primecell";
+ arm,primecell-periphid = <0x0003b966>;
+
+ reg = <0x6124000 0x1000>;
+ reg-names = "cti-base";
+ coresight-name = "coresight-cti-modem-cpu1";
+
+ clocks = <&clock_gcc clk_qdss_clk>,
+ <&clock_gcc clk_qdss_a_clk>;
+ clock-names = "apb_pclk";
+ };
+
+ modem_etm1 {
+ compatible = "qcom,coresight-remote-etm";
+ coresight-name = "coresight-modem-etm1";
+ qcom,inst-id = <11>;
+
+ port {
+ modem_etm1_out_funnel_right: endpoint {
+ remote-endpoint = <&funnel_right_in_modem_etm1>;
+ };
+ };
+ };
+
};
&clock_gcc {
diff --git a/arch/arm64/boot/dts/qcom/msm8953.dtsi b/arch/arm64/boot/dts/qcom/msm8953.dtsi
index 24701bf..b1cfed4 100644
--- a/arch/arm64/boot/dts/qcom/msm8953.dtsi
+++ b/arch/arm64/boot/dts/qcom/msm8953.dtsi
@@ -154,6 +154,15 @@
reusable;
size = <0x400000>;
};
+
+ /* global autoconfigured region for contiguous allocations */
+ linux,cma {
+ compatible = "shared-dma-pool";
+ reusable;
+ alignment = <0 0x400000>;
+ size = <0 0x1400000>;
+ linux,cma-default;
+ };
};
aliases {
diff --git a/arch/arm64/boot/dts/qcom/pmi632.dtsi b/arch/arm64/boot/dts/qcom/pmi632.dtsi
index a6f29bf..da15b33 100644
--- a/arch/arm64/boot/dts/qcom/pmi632.dtsi
+++ b/arch/arm64/boot/dts/qcom/pmi632.dtsi
@@ -44,6 +44,7 @@
interrupt-names = "eoc-int-en-set";
qcom,adc-vdd-reference = <1875>;
qcom,adc-full-scale-code = <0x70e4>;
+ qcom,pmic-revid = <&pmi632_revid>;
chan@0 {
label = "ref_gnd";
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi
index 0b36ffe..c211cec 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc-ipcamera.dtsi
@@ -206,6 +206,13 @@
qcom,wsa-max-devs = <1>;
qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0213>;
qcom,wsa-aux-dev-prefix = "SpkrLeft", "SpkrLeft";
+ qcom,linein-det-swh = <1>;
+ qcom,lineout-det-swh = <1>;
+ qcom,linein-det-gpio = <&tlmm 124 0>;
+ qcom,lineout-det-gpio = <&tlmm 125 0>;
+ pinctrl-names = "default";
+ pinctrl-0 = <&jack_det_linein_default
+ &jack_det_lineout_default>;
};
&soc {
diff --git a/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi b/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
index 7e15752..7fcfd53 100644
--- a/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605-lc.dtsi
@@ -384,3 +384,7 @@
&qupv3_se6_4uart {
status = "ok";
};
+
+&apps_smmu {
+ /delete-property/ qcom,actlr;
+};
diff --git a/arch/arm64/boot/dts/qcom/qcs605.dtsi b/arch/arm64/boot/dts/qcom/qcs605.dtsi
index d5cf3dd..3c6c7a8 100644
--- a/arch/arm64/boot/dts/qcom/qcs605.dtsi
+++ b/arch/arm64/boot/dts/qcom/qcs605.dtsi
@@ -76,6 +76,7 @@
&qcom_seecom {
reg = <0x86d00000 0x800000>;
+ /delete-property/ qcom,appsbl-qseecom-support;
};
&sp_mem {
diff --git a/arch/arm64/boot/dts/qcom/qm215-camera-sensor-qrd.dtsi b/arch/arm64/boot/dts/qcom/qm215-camera-sensor-qrd.dtsi
new file mode 100644
index 0000000..f7c6814
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qm215-camera-sensor-qrd.dtsi
@@ -0,0 +1,184 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+&cci {
+ actuator0: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&pm8916_l10>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2850000>;
+ qcom,cam-vreg-max-voltage = <2850000>;
+ qcom,cam-vreg-op-mode = <80000>;
+ };
+
+ eeprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ compatible = "qcom,eeprom";
+ qcom,cci-master = <0>;
+ reg = <0x0>;
+ cam_vana-supply = <&pm8916_l16>;
+ cam_vio-supply = <&pm8916_l6>;
+ cam_vaf-supply = <&pm8916_l10>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vaf";
+ qcom,cam-vreg-min-voltage = <0 2800000 2850000>;
+ qcom,cam-vreg-max-voltage = <0 2800000 2850000>;
+ qcom,cam-vreg-op-mode = <0 80000 100000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_default
+ &cam_sensor_rear_vdig_qm215
+ &cam_sensor_rear_default>;
+ pinctrl-1 = <&cam_sensor_mclk0_sleep
+ &cam_sensor_rear_vdig_sleep_qm215
+ &cam_sensor_rear_sleep>;
+ gpios = <&tlmm 25 0>,
+ <&tlmm 26 0>,
+ <&tlmm 36 0>;
+ qcom,gpio-reset = <2>;
+ qcom,gpio-vdig = <0>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <0 1 0>;
+ qcom,gpio-req-tbl-label = "CAM_VDIG",
+ "CAMIF_MCLK0",
+ "CAM_RESET0";
+ status = "ok";
+ clocks = <&clock_gcc clk_mclk0_clk_src>,
+ <&clock_gcc clk_gcc_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <19200000 0>;
+ };
+
+ eeprom2: qcom,eeprom@2 {
+ cell-index = <2>;
+ compatible = "qcom,eeprom";
+ reg = <0x02>;
+ cam_vana-supply = <&pm8916_l16>;
+ cam_vio-supply = <&pm8916_l6>;
+ cam_vaf-supply = <&pm8916_l10>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vaf";
+ qcom,cam-vreg-min-voltage = <0 2800000 2850000>;
+ qcom,cam-vreg-max-voltage = <0 2800000 2850000>;
+ qcom,cam-vreg-op-mode = <0 80000 100000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_default
+ &cam_sensor_rear_vdig_qm215
+ &cam_sensor_front1_default>;
+ pinctrl-1 = <&cam_sensor_mclk2_sleep
+ &cam_sensor_rear_vdig_sleep_qm215
+ &cam_sensor_front1_sleep>;
+ gpios = <&tlmm 25 0>,
+ <&tlmm 28 0>,
+ <&tlmm 40 0>;
+ qcom,gpio-reset = <2>;
+ qcom,gpio-standby = <0>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0 >;
+ qcom,gpio-req-tbl-label = "CAM_VDIG",
+ "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_STANDBY2";
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_gcc clk_mclk2_clk_src>,
+ <&clock_gcc clk_gcc_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <19200000 0>;
+ };
+
+ qcom,camera@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera";
+ reg = <0x0>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <90>;
+ qcom,actuator-src = <&actuator0>;
+ qcom,eeprom-src = <&eeprom0>;
+ cam_vana-supply = <&pm8916_l16>;
+ cam_vio-supply = <&pm8916_l6>;
+ cam_vaf-supply = <&pm8916_l10>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vaf";
+ qcom,cam-vreg-min-voltage = <0 2800000 2850000>;
+ qcom,cam-vreg-max-voltage = <0 2800000 2850000>;
+ qcom,cam-vreg-op-mode = <0 80000 100000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_default
+ &cam_sensor_rear_vdig_qm215
+ &cam_sensor_rear_default>;
+ pinctrl-1 = <&cam_sensor_mclk0_sleep
+ &cam_sensor_rear_vdig_sleep_qm215
+ &cam_sensor_rear_sleep>;
+ gpios = <&tlmm 25 0>,
+ <&tlmm 26 0>,
+ <&tlmm 36 0>;
+
+ qcom,gpio-reset = <2>;
+ qcom,gpio-vdig = <0>;
+ qcom,gpio-req-tbl-num = <0 1 2 >;
+ qcom,gpio-req-tbl-flags = <1 0 0 >;
+ qcom,gpio-req-tbl-label = "CAM_VDIG",
+ "CAMIF_MCLK0",
+ "CAM_RESET0";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ clocks = <&clock_gcc clk_mclk0_clk_src>,
+ <&clock_gcc clk_gcc_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@2 {
+ cell-index = <2>;
+ compatible = "qcom,camera";
+ reg = <0x01>;
+ qcom,csiphy-sd-index = <1>;
+ qcom,csid-sd-index = <1>;
+ qcom,eeprom-src = <&eeprom2>;
+ qcom,mount-angle = <270>;
+ cam_vana-supply = <&pm8916_l16>;
+ cam_vio-supply = <&pm8916_l6>;
+ cam_vaf-supply = <&pm8916_l10>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vaf";
+ qcom,cam-vreg-min-voltage = <0 2800000 2850000>;
+ qcom,cam-vreg-max-voltage = <0 2800000 2850000>;
+ qcom,cam-vreg-op-mode = <0 80000 100000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_default
+ &cam_sensor_rear_vdig_qm215
+ &cam_sensor_front1_default>;
+ pinctrl-1 = <&cam_sensor_mclk2_sleep
+ &cam_sensor_rear_vdig_sleep_qm215
+ &cam_sensor_front1_sleep>;
+ gpios = <&tlmm 25 0>,
+ <&tlmm 28 0>,
+ <&tlmm 40 0>;
+ qcom,gpio-reset = <2>;
+ qcom,gpio-vdig = <0>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAM_VDIG",
+ "CAMIF_MCLK2",
+ "CAM_RESET2";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ clocks = <&clock_gcc clk_mclk2_clk_src>,
+ <&clock_gcc clk_gcc_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/qm215-camera.dtsi b/arch/arm64/boot/dts/qcom/qm215-camera.dtsi
new file mode 100644
index 0000000..58d67ae
--- /dev/null
+++ b/arch/arm64/boot/dts/qcom/qm215-camera.dtsi
@@ -0,0 +1,533 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,msm-cam@1b00000 {
+ compatible = "qcom,msm-cam";
+ reg = <0x1b00000 0x40000>;
+ reg-names = "msm-cam";
+ status = "ok";
+ bus-vectors = "suspend", "svs", "nominal", "turbo";
+ qcom,bus-votes = <0 160000000 320000000 320000000>;
+ };
+
+ qcom,csiphy@1b34000 {
+ status = "ok";
+ cell-index = <0>;
+ compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
+ reg = <0x1b34000 0x1000>,
+ <0x1b00030 0x4>;
+ reg-names = "csiphy", "csiphy_clk_mux";
+ interrupts = <0 78 0>;
+ interrupt-names = "csiphy";
+ clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_csi0phytimer_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi0phytimer_clk>,
+ <&clock_gcc clk_camss_top_ahb_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi0phy_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>;
+ clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+ "csiphy_timer_src_clk", "csiphy_timer_clk",
+ "camss_ahb_src", "csi_phy_clk",
+ "camss_ahb_clk";
+ qcom,clock-rates = <0 61540000 200000000 0 0 0 0>;
+ };
+
+ qcom,csiphy@1b35000 {
+ status = "ok";
+ cell-index = <1>;
+ compatible = "qcom,csiphy-v3.4.2", "qcom,csiphy";
+ reg = <0x1b35000 0x1000>,
+ <0x1b00038 0x4>;
+ reg-names = "csiphy", "csiphy_clk_mux";
+ interrupts = <0 79 0>;
+ interrupt-names = "csiphy";
+ clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_csi1phytimer_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi1phytimer_clk>,
+ <&clock_gcc clk_camss_top_ahb_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi1phy_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>;
+ clock-names = "camss_top_ahb_clk", "ispif_ahb_clk",
+ "csiphy_timer_src_clk", "csiphy_timer_clk",
+ "camss_ahb_src", "csi_phy_clk",
+ "camss_ahb_clk";
+ qcom,clock-rates = <0 61540000 200000000 0 0 0 0>;
+ };
+
+ qcom,csid@1b30000 {
+ status = "ok";
+ cell-index = <0>;
+ compatible = "qcom,csid-v3.4.3", "qcom,csid";
+ reg = <0x1b30000 0x400>;
+ reg-names = "csid";
+ interrupts = <0 51 0>;
+ interrupt-names = "csid";
+ qcom,csi-vdd-voltage = <1088000>;
+ qcom,mipi-csi-vdd-supply = <&pm8916_l2>;
+ clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_csi0_ahb_clk>,
+ <&clock_gcc clk_csi0_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi0_clk>,
+ <&clock_gcc clk_gcc_camss_csi0pix_clk>,
+ <&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>;
+ clock-names = "camss_top_ahb_clk",
+ "ispif_ahb_clk", "csi_ahb_clk", "csi_src_clk",
+ "csi_clk", "csi_pix_clk",
+ "csi_rdi_clk", "camss_ahb_clk";
+ qcom,clock-rates = <0 61540000 0 200000000 0 0 0 0>;
+ };
+
+ qcom,csid@1b30400 {
+ status = "ok";
+ cell-index = <1>;
+ compatible = "qcom,csid-v3.4.3", "qcom,csid";
+ reg = <0x1b30400 0x400>;
+ reg-names = "csid";
+ interrupts = <0 52 0>;
+ interrupt-names = "csid";
+ qcom,csi-vdd-voltage = <1088000>;
+ qcom,mipi-csi-vdd-supply = <&pm8916_l2>;
+ clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_csi1_ahb_clk>,
+ <&clock_gcc clk_csi1_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi1_clk>,
+ <&clock_gcc clk_gcc_camss_csi1pix_clk>,
+ <&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>;
+ clock-names = "camss_top_ahb_clk",
+ "ispif_ahb_clk", "csi_ahb_clk", "csi_src_clk",
+ "csi_clk", "csi_pix_clk",
+ "csi_rdi_clk", "camss_ahb_clk";
+ qcom,clock-rates = <0 61540000 0 200000000 0 0 0 0>;
+ };
+
+ qcom,csid@1b30800 {
+ status = "ok";
+ cell-index = <2>;
+ compatible = "qcom,csid-v3.4.3", "qcom,csid";
+ reg = <0x1b30800 0x400>;
+ reg-names = "csid";
+ interrupts = <0 153 0>;
+ interrupt-names = "csid";
+ qcom,csi-vdd-voltage = <1088000>;
+ qcom,mipi-csi-vdd-supply = <&pm8916_l2>;
+ clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_csi2_ahb_clk>,
+ <&clock_gcc clk_csi2_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi2_clk>,
+ <&clock_gcc clk_gcc_camss_csi2pix_clk>,
+ <&clock_gcc clk_gcc_camss_csi2rdi_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>;
+ clock-names = "camss_top_ahb_clk",
+ "ispif_ahb_clk", "csi_ahb_clk", "csi_src_clk",
+ "csi_clk", "csi_pix_clk",
+ "csi_rdi_clk", "camss_ahb_clk";
+ qcom,clock-rates = <0 61540000 0 200000000 0 0 0 0>;
+ };
+
+ qcom,ispif@1b31000 {
+ cell-index = <0>;
+ compatible = "qcom,ispif-v3.0", "qcom,ispif";
+ reg = <0x1b31000 0x500>,
+ <0x1b00020 0x10>;
+ reg-names = "ispif", "csi_clk_mux";
+ interrupts = <0 55 0>;
+ interrupt-names = "ispif";
+ qcom,num-isps = <0x2>;
+ vfe0-vdd-supply = <&gdsc_vfe>;
+ vfe1-vdd-supply = <&gdsc_vfe1>;
+ qcom,vdd-names = "vfe0-vdd", "vfe1-vdd";
+ clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_camss_top_ahb_clk_src>,
+ <&clock_gcc clk_csi0_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi0_clk>,
+ <&clock_gcc clk_gcc_camss_csi0rdi_clk>,
+ <&clock_gcc clk_gcc_camss_csi0pix_clk>,
+ <&clock_gcc clk_csi1_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi1_clk>,
+ <&clock_gcc clk_gcc_camss_csi1rdi_clk>,
+ <&clock_gcc clk_gcc_camss_csi1pix_clk>,
+ <&clock_gcc clk_csi2_clk_src>,
+ <&clock_gcc clk_gcc_camss_csi2_clk>,
+ <&clock_gcc clk_gcc_camss_csi2rdi_clk>,
+ <&clock_gcc clk_gcc_camss_csi2pix_clk>,
+ <&clock_gcc clk_vfe0_clk_src>,
+ <&clock_gcc clk_gcc_camss_vfe0_clk>,
+ <&clock_gcc clk_gcc_camss_csi_vfe0_clk>,
+ <&clock_gcc clk_vfe1_clk_src>,
+ <&clock_gcc clk_gcc_camss_vfe1_clk>,
+ <&clock_gcc clk_gcc_camss_csi_vfe1_clk>;
+ clock-names = "ispif_ahb_clk",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "camss_ahb_src",
+ "csi0_src_clk", "csi0_clk",
+ "csi0_rdi_clk", "csi0_pix_clk",
+ "csi1_src_clk", "csi1_clk",
+ "csi1_rdi_clk", "csi1_pix_clk",
+ "csi2_src_clk", "csi2_clk",
+ "csi2_rdi_clk", "csi2_pix_clk",
+ "vfe0_clk_src", "camss_vfe_vfe0_clk",
+ "camss_csi_vfe0_clk", "vfe1_clk_src",
+ "camss_vfe_vfe1_clk", "camss_csi_vfe1_clk";
+ qcom,clock-rates = <61540000 0 0 0
+ 200000000 0 0 0
+ 200000000 0 0 0
+ 200000000 0 0 0
+ 0 0 0
+ 0 0 0>;
+ qcom,clock-cntl-support;
+ qcom,clock-control = "SET_RATE","NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE", "SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+ "SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "INIT_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "INIT_RATE", "NO_SET_RATE",
+ "NO_SET_RATE";
+ };
+
+ vfe0: qcom,vfe0@1b10000 {
+ cell-index = <0>;
+ compatible = "qcom,vfe40";
+ reg = <0x1b10000 0x1000>,
+ <0x1b40000 0x200>;
+ reg-names = "vfe", "vfe_vbif";
+ interrupts = <0 57 0>;
+ interrupt-names = "vfe";
+ vdd-supply = <&gdsc_vfe>;
+ clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>,
+ <&clock_gcc clk_vfe0_clk_src>,
+ <&clock_gcc clk_gcc_camss_vfe0_clk>,
+ <&clock_gcc clk_gcc_camss_csi_vfe0_clk>,
+ <&clock_gcc clk_gcc_camss_vfe_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_vfe_axi_clk>,
+ <&clock_gcc clk_gcc_camss_ispif_ahb_clk>;
+ clock-names = "camss_top_ahb_clk", "camss_ahb_clk",
+ "vfe_clk_src", "camss_vfe_vfe_clk",
+ "camss_csi_vfe_clk", "iface_clk",
+ "bus_clk", "iface_ahb_clk";
+ qcom,clock-rates = <0 0 266670000 0 0 0 0 0>;
+ qos-entries = <8>;
+ qos-regs = <0x2c4 0x2c8 0x2cc 0x2d0 0x2d4 0x2d8
+ 0x2dc 0x2e0>;
+ qos-settings = <0xaa55aa55
+ 0xaa55aa55 0xaa55aa55
+ 0xaa55aa55 0xaa55aa55
+ 0xaa55aa55 0xaa55aa55
+ 0xaa55aa55>;
+ vbif-entries = <1>;
+ vbif-regs = <0x124>;
+ vbif-settings = <0x3>;
+ ds-entries = <17>;
+ ds-regs = <0x988 0x98c 0x990 0x994 0x998
+ 0x99c 0x9a0 0x9a4 0x9a8 0x9ac 0x9b0
+ 0x9b4 0x9b8 0x9bc 0x9c0 0x9c4 0x9c8>;
+ ds-settings = <0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0x00000110>;
+ max-clk-nominal = <400000000>;
+ max-clk-turbo = <432000000>;
+ };
+
+ vfe1: qcom,vfe1@1b14000 {
+ cell-index = <1>;
+ compatible = "qcom,vfe40";
+ reg = <0x1b14000 0x1000>,
+ <0x1ba0000 0x200>;
+ reg-names = "vfe", "vfe_vbif";
+ interrupts = <0 29 0>;
+ interrupt-names = "vfe";
+ vdd-supply = <&gdsc_vfe1>;
+ clocks = <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>,
+ <&clock_gcc clk_vfe1_clk_src>,
+ <&clock_gcc clk_gcc_camss_vfe1_clk>,
+ <&clock_gcc clk_gcc_camss_csi_vfe1_clk>,
+ <&clock_gcc clk_gcc_camss_vfe1_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_vfe1_axi_clk>,
+ <&clock_gcc clk_gcc_camss_ispif_ahb_clk>;
+ clock-names = "camss_top_ahb_clk" , "camss_ahb_clk",
+ "vfe_clk_src", "camss_vfe_vfe_clk",
+ "camss_csi_vfe_clk", "iface_clk",
+ "bus_clk", "iface_ahb_clk";
+ qcom,clock-rates = <0 0 266670000 0 0 0 0 0>;
+ qos-entries = <8>;
+ qos-regs = <0x2c4 0x2c8 0x2cc 0x2d0 0x2d4 0x2d8
+ 0x2dc 0x2e0>;
+ qos-settings = <0xaa55aa55
+ 0xaa55aa55 0xaa55aa55
+ 0xaa55aa55 0xaa55aa55
+ 0xaa55aa55 0xaa55aa55
+ 0xaa55aa55>;
+ vbif-entries = <1>;
+ vbif-regs = <0x124>;
+ vbif-settings = <0x3>;
+ ds-entries = <17>;
+ ds-regs = <0x988 0x98c 0x990 0x994 0x998
+ 0x99c 0x9a0 0x9a4 0x9a8 0x9ac 0x9b0
+ 0x9b4 0x9b8 0x9bc 0x9c0 0x9c4 0x9c8>;
+ ds-settings = <0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0xcccc1111
+ 0xcccc1111 0x00000110>;
+ max-clk-nominal = <400000000>;
+ max-clk-turbo = <432000000>;
+ };
+
+ qcom,vfe {
+ compatible = "qcom,vfe";
+ num_child = <2>;
+ };
+
+ qcom,cam_smmu {
+ status = "ok";
+ compatible = "qcom,msm-cam-smmu";
+ msm_cam_smmu_cb1: msm_cam_smmu_cb1 {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_iommu 0x400 0x00>,
+ <&apps_iommu 0x2400 0x00>;
+ label = "vfe";
+ qcom,scratch-buf-support;
+ };
+
+ msm_cam_smmu_cb2: msm_cam_smmu_cb2 {
+ compatible = "qcom,msm-cam-smmu-cb";
+ label = "vfe_secure";
+ qcom,secure-context;
+ };
+
+ msm_cam_smmu_cb3: msm_cam_smmu_cb3 {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_iommu 0x1c00 0x00>;
+ label = "cpp";
+ };
+
+ msm_cam_smmu_cb4: msm_cam_smmu_cb4 {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&apps_iommu 0x1800 0x00>;
+ label = "jpeg_enc0";
+ };
+ };
+
+ qcom,jpeg@1b1c000 {
+ status = "ok";
+ cell-index = <0>;
+ compatible = "qcom,jpeg";
+ reg = <0x1b1c000 0x400>,
+ <0x1b60000 0xc30>;
+ reg-names = "jpeg_hw", "jpeg_vbif";
+ interrupts = <0 59 0>;
+ interrupt-names = "jpeg";
+ vdd-supply = <&gdsc_jpeg>;
+ qcom,vdd-names = "vdd";
+ clock-names = "core_clk", "iface_clk", "bus_clk0",
+ "camss_top_ahb_clk", "camss_ahb_clk";
+ clocks = <&clock_gcc clk_gcc_camss_jpeg0_clk>,
+ <&clock_gcc clk_gcc_camss_jpeg_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_jpeg_axi_clk>,
+ <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>;
+ qcom,clock-rates = <266670000 0 0 0 0>;
+ qcom,qos-reg-settings = <0x28 0x0000555e>,
+ <0xc8 0x00005555>;
+ qcom,msm-bus,name = "msm_camera_jpeg0";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <62 512 0 0>,
+ <62 512 800000 800000>;
+ qcom,vbif-reg-settings = <0xc0 0x10101000>,
+ <0xb0 0x10100010>;
+ };
+
+ qcom,irqrouter@1b00000 {
+ status = "ok";
+ cell-index = <0>;
+ compatible = "qcom,irqrouter";
+ reg = <0x1b00000 0x100>;
+ reg-names = "irqrouter";
+ };
+
+ qcom,cpp@1b04000 {
+ status = "ok";
+ cell-index = <0>;
+ compatible = "qcom,cpp";
+ reg = <0x1b04000 0x100>,
+ <0x1b80000 0x200>,
+ <0x1b18000 0x018>,
+ <0x1858078 0x4>;
+ reg-names = "cpp", "cpp_vbif", "cpp_hw", "camss_cpp";
+ interrupts = <0 49 0>;
+ interrupt-names = "cpp";
+ vdd-supply = <&gdsc_cpp>;
+ qcom,vdd-names = "vdd";
+ clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_cpp_clk_src>,
+ <&clock_gcc clk_gcc_camss_top_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_cpp_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_cpp_axi_clk>,
+ <&clock_gcc clk_gcc_camss_cpp_clk>,
+ <&clock_gcc clk_gcc_camss_micro_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>;
+ clock-names = "ispif_ahb_clk", "cpp_core_clk",
+ "camss_top_ahb_clk", "camss_vfe_cpp_ahb_clk",
+ "camss_vfe_cpp_axi_clk", "camss_vfe_cpp_clk",
+ "micro_iface_clk", "camss_ahb_clk";
+ qcom,clock-rates = <61540000 180000000 0 0 0 180000000 0 0>;
+ qcom,min-clock-rate = <133000000>;
+ resets = <&clock_gcc GCC_CAMSS_MICRO_BCR>;
+ reset-names = "micro_iface_reset";
+ qcom,bus-master = <1>;
+ qcom,msm-bus,name = "msm_camera_cpp";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <106 512 0 0>,
+ <106 512 0 0>;
+ qcom,msm-bus-vector-dyn-vote;
+ qcom,micro-reset;
+ qcom,cpp-fw-payload-info {
+ qcom,stripe-base = <156>;
+ qcom,plane-base = <141>;
+ qcom,stripe-size = <27>;
+ qcom,plane-size = <5>;
+ qcom,fe-ptr-off = <5>;
+ qcom,we-ptr-off = <11>;
+ };
+ };
+
+ cci: qcom,cci@1b0c000 {
+ status = "ok";
+ cell-index = <0>;
+ compatible = "qcom,cci";
+ reg = <0x1b0c000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "cci";
+ interrupts = <0 50 0>;
+ interrupt-names = "cci";
+ clocks = <&clock_gcc clk_gcc_camss_ispif_ahb_clk>,
+ <&clock_gcc clk_cci_clk_src>,
+ <&clock_gcc clk_gcc_camss_cci_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_cci_clk>,
+ <&clock_gcc clk_gcc_camss_ahb_clk>,
+ <&clock_gcc clk_gcc_camss_top_ahb_clk>;
+ clock-names = "ispif_ahb_clk", "cci_src_clk",
+ "cci_ahb_clk", "camss_cci_clk",
+ "camss_ahb_clk", "camss_top_ahb_clk";
+ qcom,clock-rates = <61540000 19200000 0 0 0 0>,
+ <61540000 37500000 0 0 0 0>;
+ pinctrl-names = "cci_default", "cci_suspend";
+ pinctrl-0 = <&cci0_active &cci1_active>;
+ pinctrl-1 = <&cci0_suspend &cci1_suspend>;
+ gpios = <&tlmm 29 0>,
+ <&tlmm 30 0>,
+ <&tlmm 31 0>,
+ <&tlmm 32 0>;
+ qcom,gpio-tbl-num = <0 1 2 3>;
+ qcom,gpio-tbl-flags = <1 1 1 1>;
+ qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+ "CCI_I2C_CLK0",
+ "CCI_I2C_DATA1",
+ "CCI_I2C_CLK1";
+ i2c_freq_100Khz: qcom,i2c_standard_mode {
+ status = "disabled";
+ };
+ i2c_freq_400Khz: qcom,i2c_fast_mode {
+ status = "disabled";
+ };
+ i2c_freq_custom: qcom,i2c_custom_mode {
+ status = "disabled";
+ };
+
+ i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+ status = "disabled";
+ };
+ };
+};
+
+&i2c_freq_100Khz {
+ qcom,hw-thigh = <78>;
+ qcom,hw-tlow = <114>;
+ qcom,hw-tsu-sto = <28>;
+ qcom,hw-tsu-sta = <28>;
+ qcom,hw-thd-dat = <10>;
+ qcom,hw-thd-sta = <77>;
+ qcom,hw-tbuf = <118>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <1>;
+};
+
+&i2c_freq_400Khz {
+ qcom,hw-thigh = <20>;
+ qcom,hw-tlow = <28>;
+ qcom,hw-tsu-sto = <21>;
+ qcom,hw-tsu-sta = <21>;
+ qcom,hw-thd-dat = <13>;
+ qcom,hw-thd-sta = <18>;
+ qcom,hw-tbuf = <32>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <3>;
+ status = "ok";
+};
+
+&i2c_freq_custom {
+ qcom,hw-thigh = <15>;
+ qcom,hw-tlow = <28>;
+ qcom,hw-tsu-sto = <21>;
+ qcom,hw-tsu-sta = <21>;
+ qcom,hw-thd-dat = <13>;
+ qcom,hw-thd-sta = <18>;
+ qcom,hw-tbuf = <25>;
+ qcom,hw-scl-stretch-en = <1>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <3>;
+ status = "ok";
+};
+
+&i2c_freq_1Mhz {
+ qcom,hw-thigh = <16>;
+ qcom,hw-tlow = <22>;
+ qcom,hw-tsu-sto = <17>;
+ qcom,hw-tsu-sta = <18>;
+ qcom,hw-thd-dat = <16>;
+ qcom,hw-thd-sta = <15>;
+ qcom,hw-tbuf = <19>;
+ qcom,hw-scl-stretch-en = <1>;
+ qcom,hw-trdhld = <3>;
+ qcom,hw-tsp = <3>;
+ qcom,cci-clk-src = <37500000>;
+ status = "ok";
+};
diff --git a/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi b/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
index 46e4bf7..6b08005 100644
--- a/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215-pm8916.dtsi
@@ -88,7 +88,8 @@
};
qcom,wcnss-wlan@a000000 {
- /delete-property/ qcom,wcnss-adc_tm;
+ /delete-property/ qcom,has-vsys-adc-channel;
+ qcom,wcnss-adc_tm = <&pm8916_adc_tm>;
qcom,pronto-vddmx-supply = <&pm8916_l2_level_ao>;
qcom,pronto-vddcx-supply = <&pm8916_s1_level>;
qcom,pronto-vddpx-supply = <&pm8916_l7>;
@@ -120,10 +121,6 @@
qcom,pronto-vddpx-current = <0>;
};
- /delete-node/ qcom,csid@1b30000;
- /delete-node/ qcom,csid@1b30400;
- /delete-node/ qcom,csid@1b30800;
-
/* mem_acc */
/delete-node/ regulator@01946004;
/* apc vreg */
@@ -305,7 +302,7 @@
qcom,decimation = <0>;
qcom,pre-div-channel-scaling = <0>;
qcom,calibration-type = "ratiometric";
- qcom,scale-function = <22>;
+ qcom,scale-function = <26>;
qcom,hw-settle-time = <0xb>;
qcom,fast-avg-setup = <0>;
};
@@ -389,7 +386,7 @@
thermal-zones {
xo-therm-buf-adc {
polling-delay-passive = <0>;
- polling-delay = <0>;
+ polling-delay = <5000>;
thermal-sensors = <&pm8916_vadc 0x3c>;
thermal-governor = "user_space";
@@ -404,7 +401,7 @@
xo-therm-adc {
polling-delay-passive = <0>;
- polling-delay = <0>;
+ polling-delay = <5000>;
thermal-sensors = <&pm8916_vadc 0x32>;
thermal-governor = "user_space";
@@ -419,7 +416,7 @@
pa-therm0-adc {
polling-delay-passive = <0>;
- polling-delay = <0>;
+ polling-delay = <5000>;
thermal-sensors = <&pm8916_vadc 0x36>;
thermal-governor = "user_space";
@@ -434,7 +431,7 @@
skin-therm-adc {
polling-delay-passive = <0>;
- polling-delay = <0>;
+ polling-delay = <5000>;
thermal-sensors = <&pm8916_vadc 0x11>;
thermal-governor = "user_space";
diff --git a/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi b/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi
index 76540a1..9d1459f 100644
--- a/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215-qrd.dtsi
@@ -15,11 +15,15 @@
status = "ok";
};
+#include <dt-bindings/clock/msm-clocks-8952.h>
+#include "qm215-camera-sensor-qrd.dtsi"
+
&pm8916_chg{
status = "ok";
qcom,chgr-led-support;
qcom,vddmax-mv = <4400>;
qcom,vddsafe-mv = <4400>;
+ qcom,batt-hot-percentage = <35>;
};
&pm8916_bms{
@@ -120,6 +124,39 @@
/delete-node/ qcom,panel-supply-entry@3;
};
+&pm8916_gpios {
+ nfc_clk {
+ nfc_clk_default: nfc_clk_default {
+ pins = "gpio2";
+ function = "normal";
+ input-enable;
+ power-source = <1>;
+ };
+ };
+};
+
+&i2c_5 { /* BLSP2 QUP1 (NFC) */
+ status = "ok";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 17 0x00>;
+ qcom,nq-ven = <&tlmm 16 0x00>;
+ qcom,nq-firm = <&tlmm 130 0x00>;
+ qcom,nq-clkreq = <&pm8916_gpios 2 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK2";
+ interrupts = <17 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_disable_active
+ &nfc_clk_default>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_disable_suspend>;
+ clocks = <&clock_gcc clk_bb_clk2_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
&mdss_dsi {
hw-config = "single_dsi";
};
diff --git a/arch/arm64/boot/dts/qcom/qm215-regulator.dtsi b/arch/arm64/boot/dts/qcom/qm215-regulator.dtsi
index 6f6d301..a6dc66a 100644
--- a/arch/arm64/boot/dts/qcom/qm215-regulator.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215-regulator.dtsi
@@ -64,7 +64,6 @@
pm8916_s3: regulator-s3 {
regulator-min-microvolt = <1000000>;
regulator-max-microvolt = <1448000>;
- qcom,init-voltage = <1000000>;
status = "okay";
};
};
@@ -93,9 +92,8 @@
rpm-regulator-ldoa2 {
status = "okay";
pm8916_l2: regulator-l2 {
- regulator-min-microvolt = <944000>;
+ regulator-min-microvolt = <500000>;
regulator-max-microvolt = <1088000>;
- qcom,init-voltage = <944000>;
status = "okay";
};
diff --git a/arch/arm64/boot/dts/qcom/qm215.dtsi b/arch/arm64/boot/dts/qcom/qm215.dtsi
index c16774b..0589f45 100644
--- a/arch/arm64/boot/dts/qcom/qm215.dtsi
+++ b/arch/arm64/boot/dts/qcom/qm215.dtsi
@@ -12,6 +12,7 @@
*/
#include "msm8917.dtsi"
+#include "qm215-camera.dtsi"
/ {
model = "Qualcomm Technologies, Inc. QM215";
compatible = "qcom,qm215";
@@ -34,3 +35,212 @@
308570000 270000000 200000000>;
};
};
+
+/* GPU overrides */
+&msm_gpu {
+
+ qcom,gpu-speed-bin = <0x0164 0x00000600 9>;
+ /delete-property/qcom,gpu-speed-bin-vectors;
+ /delete-node/qcom,gpu-pwrlevel-bins;
+
+ qcom,gpu-pwrlevel-bins {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ compatible="qcom,gpu-pwrlevel-bins";
+
+ qcom,gpu-pwrlevels-0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <0>;
+ qcom,initial-pwrlevel = <3>;
+
+ /* TURBO */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <598000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <7>;
+ qcom,bus-max = <7>;
+ };
+
+ /* NOM+ */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <523200000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <7>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <484800000>;
+ qcom,bus-freq = <5>;
+ qcom,bus-min = <4>;
+ qcom,bus-max = <6>;
+ };
+
+ /* SVS+ */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <400000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <270000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <1>;
+ qcom,bus-max = <3>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <19200000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+
+ qcom,gpu-pwrlevels-1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <1>;
+ qcom,initial-pwrlevel = <2>;
+
+ /* NOM+ */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <500000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <6>;
+ qcom,bus-max = <7>;
+ };
+
+ /* NOM */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <465000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <7>;
+ };
+
+ /* SVS+ */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <400000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <270000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <1>;
+ qcom,bus-max = <3>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <19200000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+
+ qcom,gpu-pwrlevels-2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <2>;
+ qcom,initial-pwrlevel = <1>;
+
+ /* NOM */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <465000000>;
+ qcom,bus-freq = <7>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <7>;
+ };
+
+ /* SVS+ */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <400000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <270000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <1>;
+ qcom,bus-max = <3>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <19200000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+
+ qcom,gpu-pwrlevels-3 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <3>;
+ qcom,initial-pwrlevel = <0>;
+
+ /* SVS+ */
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <400000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <6>;
+ };
+
+ /* SVS */
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <270000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <1>;
+ qcom,bus-max = <3>;
+ };
+
+ /* XO */
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <19200000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+ };
+ };
+ };
+};
diff --git a/arch/arm64/boot/dts/qcom/sdm439.dtsi b/arch/arm64/boot/dts/qcom/sdm439.dtsi
index a59b457..ddf9ee6 100644
--- a/arch/arm64/boot/dts/qcom/sdm439.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm439.dtsi
@@ -35,6 +35,12 @@
qcom,csi-vdd-voltage = <800000>;
qcom,mipi-csi-vdd-supply = <&pm8953_l23>;
};
+ qcom,csiphy@1b34000 {
+ compatible = "qcom,csiphy-v10.00", "qcom,csiphy";
+ };
+ qcom,csiphy@1b35000 {
+ compatible = "qcom,csiphy-v10.00", "qcom,csiphy";
+ };
/delete-node/ qcom,msm-cpufreq;
msm_cpufreq: qcom,msm-cpufreq {
@@ -675,6 +681,10 @@
};
};
+&sdhc_1 {
+ qcom,ddr-config = <0x00040868>;
+};
+
&mdss_mdp {
qcom,vbif-settings = <0xd0 0x20>;
};
diff --git a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
index 59fb78e..aa6b890 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-pinctrl.dtsi
@@ -1732,6 +1732,36 @@
};
};
+ gpio_jack_det_line_in {
+ jack_det_linein_default: jack_det_linein_default {
+ mux {
+ pins = "gpio124";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio124";
+ bias-pull-up; /* pull up */
+ input-enable;
+ };
+ };
+ };
+
+ gpio_jack_det_line_out {
+ jack_det_lineout_default: jack_det_lineout_default {
+ mux {
+ pins = "gpio125";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio125";
+ bias-pull-up; /* pull up */
+ input-enable;
+ };
+ };
+ };
+
flash_led3_front {
flash_led3_front_en: flash_led3_front_en {
mux {
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
index aaa516b..2887f38 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-display.dtsi
@@ -137,9 +137,16 @@
qcom,dsi-ctrl = <&mdss_dsi0 &mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy0 &mdss_dsi_phy1>;
+
clocks = <&mdss_dsi0_pll BYTECLK_MUX_0_CLK>,
- <&mdss_dsi0_pll PCLK_MUX_0_CLK>;
- clock-names = "mux_byte_clk", "mux_pixel_clk";
+ <&mdss_dsi0_pll PCLK_MUX_0_CLK>,
+ <&mdss_dsi0_pll BYTECLK_SRC_0_CLK>,
+ <&mdss_dsi0_pll PCLK_SRC_0_CLK>,
+ <&mdss_dsi0_pll SHADOW_BYTECLK_SRC_0_CLK>,
+ <&mdss_dsi0_pll SHADOW_PCLK_SRC_0_CLK>;
+ clock-names = "mux_byte_clk", "mux_pixel_clk",
+ "src_byte_clk", "src_pixel_clk",
+ "shadow_byte_clk", "shadow_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -209,8 +216,14 @@
qcom,dsi-ctrl = <&mdss_dsi1>;
qcom,dsi-phy = <&mdss_dsi_phy1>;
clocks = <&mdss_dsi1_pll BYTECLK_MUX_1_CLK>,
- <&mdss_dsi1_pll PCLK_MUX_1_CLK>;
- clock-names = "mux_byte_clk", "mux_pixel_clk";
+ <&mdss_dsi1_pll PCLK_MUX_1_CLK>,
+ <&mdss_dsi1_pll BYTECLK_SRC_1_CLK>,
+ <&mdss_dsi1_pll PCLK_SRC_1_CLK>,
+ <&mdss_dsi1_pll SHADOW_BYTECLK_SRC_1_CLK>,
+ <&mdss_dsi1_pll SHADOW_PCLK_SRC_1_CLK>;
+ clock-names = "mux_byte_clk", "mux_pixel_clk",
+ "src_byte_clk", "src_pixel_clk",
+ "shadow_byte_clk", "shadow_pixel_clk";
pinctrl-names = "panel_active", "panel_suspend";
pinctrl-0 = <&sde_dsi_active &sde_te_active>;
@@ -632,6 +645,9 @@
qcom,mdss-dsi-panel-status-value = <0x9c>;
qcom,mdss-dsi-panel-on-check-value = <0x9c>;
qcom,mdss-dsi-panel-status-read-length = <1>;
+ qcom,dsi-dyn-clk-enable;
+ qcom,dsi-dyn-clk-list =
+ <804948480 798240576 801594528 808302432 811656384>;
qcom,mdss-dsi-display-timings {
timing@0{
qcom,mdss-dsi-panel-phy-timings = [00 1c 07 07 23 21 07
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
index 72e3f5f..326f4c0 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde-pll.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -18,11 +18,14 @@
#clock-cells = <1>;
reg = <0xae94a00 0x1e0>,
<0xae94400 0x800>,
- <0xaf03000 0x8>;
- reg-names = "pll_base", "phy_base", "gdsc_base";
+ <0xaf03000 0x8>,
+ <0xae94200 0x100>;
+ reg-names = "pll_base", "phy_base", "gdsc_base",
+ "dynamic_pll_base";
clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
clock-names = "iface_clk";
clock-rate = <0>;
+ memory-region = <&dfps_data_memory>;
gdsc-supply = <&mdss_core_gdsc>;
qcom,platform-supply-entries {
#address-cells = <1>;
@@ -45,8 +48,10 @@
#clock-cells = <1>;
reg = <0xae96a00 0x1e0>,
<0xae96400 0x800>,
- <0xaf03000 0x8>;
- reg-names = "pll_base", "phy_base", "gdsc_base";
+ <0xaf03000 0x8>,
+ <0xae96200 0x100>;
+ reg-names = "pll_base", "phy_base", "gdsc_base",
+ "dynamic_pll_base";
clocks = <&clock_dispcc DISP_CC_MDSS_AHB_CLK>;
clock-names = "iface_clk";
clock-rate = <0>;
diff --git a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
index fb717f3..9a567a3 100644
--- a/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670-sde.dtsi
@@ -485,8 +485,9 @@
compatible = "qcom,dsi-phy-v3.0";
label = "dsi-phy-0";
cell-index = <0>;
- reg = <0xae94400 0x7c0>;
- reg-names = "dsi_phy";
+ reg = <0xae94400 0x7c0>,
+ <0xae94200 0x100>;
+ reg-names = "dsi_phy", "dyn_refresh_base";
gdsc-supply = <&mdss_core_gdsc>;
vdda-0p9-supply = <&pm660l_l1>;
qcom,platform-strength-ctrl = [55 03
@@ -518,8 +519,9 @@
compatible = "qcom,dsi-phy-v3.0";
label = "dsi-phy-1";
cell-index = <1>;
- reg = <0xae96400 0x7c0>;
- reg-names = "dsi_phy";
+ reg = <0xae96400 0x7c0>,
+ <0xae96200 0x100>;
+ reg-names = "dsi_phy", "dyn_refresh_base";
gdsc-supply = <&mdss_core_gdsc>;
vdda-0p9-supply = <&pm660l_l1>;
qcom,platform-strength-ctrl = [55 03
diff --git a/arch/arm64/boot/dts/qcom/sdm670.dtsi b/arch/arm64/boot/dts/qcom/sdm670.dtsi
index f1109e2..af44079 100644
--- a/arch/arm64/boot/dts/qcom/sdm670.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm670.dtsi
@@ -596,10 +596,15 @@
};
cont_splash_memory: cont_splash_region@9c000000 {
- reg = <0x0 0x9c000000 0x0 0x02400000>;
+ reg = <0x0 0x9c000000 0x0 0x2300000>;
label = "cont_splash_region";
};
+ dfps_data_memory: dfps_data_region@9e300000 {
+ reg = <0x0 0x9e300000 0x0 0x0100000>;
+ label = "dfps_data_region";
+ };
+
dump_mem: mem_dump_region {
compatible = "shared-dma-pool";
reusable;
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi
index b671d0e..9409a4c 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr-dvt.dtsi
@@ -69,7 +69,7 @@
sensor-position-yaw = <0>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
- cam_vdig-supply = <&pm8998_s3>;
+ cam_vdig-supply = <&camera_eyetracking_force>;
cam_clk-supply = <&titan_top_gdsc>;
regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
diff --git a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
index 6052074..f19007f 100644
--- a/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
+++ b/arch/arm64/boot/dts/qcom/sdm845-camera-sensor-qvr.dtsi
@@ -591,7 +591,7 @@
sensor-position-yaw = <0>;
cam_vio-supply = <&pm8998_lvs1>;
cam_vana-supply = <&pmi8998_bob>;
- cam_vdig-supply = <&pm8998_s3>;
+ cam_vdig-supply = <&camera_eyetracking_force>;
cam_clk-supply = <&titan_top_gdsc>;
regulator-names = "cam_vio", "cam_vana", "cam_vdig",
"cam_clk";
diff --git a/arch/arm64/configs/msm8937-perf_defconfig b/arch/arm64/configs/msm8937-perf_defconfig
index 65b5dd6..c71f860 100644
--- a/arch/arm64/configs/msm8937-perf_defconfig
+++ b/arch/arm64/configs/msm8937-perf_defconfig
@@ -481,6 +481,7 @@
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
CONFIG_USB_CONFIGFS_RNDIS=y
CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
diff --git a/arch/arm64/configs/msm8937_defconfig b/arch/arm64/configs/msm8937_defconfig
index 4b416db7..5b6f4a5 100644
--- a/arch/arm64/configs/msm8937_defconfig
+++ b/arch/arm64/configs/msm8937_defconfig
@@ -491,6 +491,7 @@
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
+CONFIG_USB_CONFIGFS_QCRNDIS=y
CONFIG_USB_CONFIGFS_RNDIS=y
CONFIG_USB_CONFIGFS_RMNET_BAM=y
CONFIG_USB_CONFIGFS_MASS_STORAGE=y
diff --git a/arch/arm64/crypto/Kconfig b/arch/arm64/crypto/Kconfig
index 2b10984..2cf32e9 100644
--- a/arch/arm64/crypto/Kconfig
+++ b/arch/arm64/crypto/Kconfig
@@ -53,11 +53,4 @@
tristate "CRC32 and CRC32C using optional ARMv8 instructions"
depends on ARM64
select CRYPTO_HASH
-
-config CRYPTO_SPECK_NEON
- tristate "NEON accelerated Speck cipher algorithms"
- depends on KERNEL_MODE_NEON
- select CRYPTO_BLKCIPHER
- select CRYPTO_GF128MUL
- select CRYPTO_SPECK
endif
diff --git a/arch/arm64/crypto/Makefile b/arch/arm64/crypto/Makefile
index 9908765..550e02a 100644
--- a/arch/arm64/crypto/Makefile
+++ b/arch/arm64/crypto/Makefile
@@ -30,9 +30,6 @@
obj-$(CONFIG_CRYPTO_AES_ARM64_NEON_BLK) += aes-neon-blk.o
aes-neon-blk-y := aes-glue-neon.o aes-neon.o
-obj-$(CONFIG_CRYPTO_SPECK_NEON) += speck-neon.o
-speck-neon-y := speck-neon-core.o speck-neon-glue.o
-
AFLAGS_aes-ce.o := -DINTERLEAVE=4
AFLAGS_aes-neon.o := -DINTERLEAVE=4
diff --git a/arch/arm64/crypto/speck-neon-core.S b/arch/arm64/crypto/speck-neon-core.S
deleted file mode 100644
index b144634..0000000
--- a/arch/arm64/crypto/speck-neon-core.S
+++ /dev/null
@@ -1,352 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * ARM64 NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Author: Eric Biggers <ebiggers@google.com>
- */
-
-#include <linux/linkage.h>
-
- .text
-
- // arguments
- ROUND_KEYS .req x0 // const {u64,u32} *round_keys
- NROUNDS .req w1 // int nrounds
- NROUNDS_X .req x1
- DST .req x2 // void *dst
- SRC .req x3 // const void *src
- NBYTES .req w4 // unsigned int nbytes
- TWEAK .req x5 // void *tweak
-
- // registers which hold the data being encrypted/decrypted
- // (underscores avoid a naming collision with ARM64 registers x0-x3)
- X_0 .req v0
- Y_0 .req v1
- X_1 .req v2
- Y_1 .req v3
- X_2 .req v4
- Y_2 .req v5
- X_3 .req v6
- Y_3 .req v7
-
- // the round key, duplicated in all lanes
- ROUND_KEY .req v8
-
- // index vector for tbl-based 8-bit rotates
- ROTATE_TABLE .req v9
- ROTATE_TABLE_Q .req q9
-
- // temporary registers
- TMP0 .req v10
- TMP1 .req v11
- TMP2 .req v12
- TMP3 .req v13
-
- // multiplication table for updating XTS tweaks
- GFMUL_TABLE .req v14
- GFMUL_TABLE_Q .req q14
-
- // next XTS tweak value(s)
- TWEAKV_NEXT .req v15
-
- // XTS tweaks for the blocks currently being encrypted/decrypted
- TWEAKV0 .req v16
- TWEAKV1 .req v17
- TWEAKV2 .req v18
- TWEAKV3 .req v19
- TWEAKV4 .req v20
- TWEAKV5 .req v21
- TWEAKV6 .req v22
- TWEAKV7 .req v23
-
- .align 4
-.Lror64_8_table:
- .octa 0x080f0e0d0c0b0a090007060504030201
-.Lror32_8_table:
- .octa 0x0c0f0e0d080b0a090407060500030201
-.Lrol64_8_table:
- .octa 0x0e0d0c0b0a09080f0605040302010007
-.Lrol32_8_table:
- .octa 0x0e0d0c0f0a09080b0605040702010003
-.Lgf128mul_table:
- .octa 0x00000000000000870000000000000001
-.Lgf64mul_table:
- .octa 0x0000000000000000000000002d361b00
-
-/*
- * _speck_round_128bytes() - Speck encryption round on 128 bytes at a time
- *
- * Do one Speck encryption round on the 128 bytes (8 blocks for Speck128, 16 for
- * Speck64) stored in X0-X3 and Y0-Y3, using the round key stored in all lanes
- * of ROUND_KEY. 'n' is the lane size: 64 for Speck128, or 32 for Speck64.
- * 'lanes' is the lane specifier: "2d" for Speck128 or "4s" for Speck64.
- */
-.macro _speck_round_128bytes n, lanes
-
- // x = ror(x, 8)
- tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
- tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
- tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
- tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
-
- // x += y
- add X_0.\lanes, X_0.\lanes, Y_0.\lanes
- add X_1.\lanes, X_1.\lanes, Y_1.\lanes
- add X_2.\lanes, X_2.\lanes, Y_2.\lanes
- add X_3.\lanes, X_3.\lanes, Y_3.\lanes
-
- // x ^= k
- eor X_0.16b, X_0.16b, ROUND_KEY.16b
- eor X_1.16b, X_1.16b, ROUND_KEY.16b
- eor X_2.16b, X_2.16b, ROUND_KEY.16b
- eor X_3.16b, X_3.16b, ROUND_KEY.16b
-
- // y = rol(y, 3)
- shl TMP0.\lanes, Y_0.\lanes, #3
- shl TMP1.\lanes, Y_1.\lanes, #3
- shl TMP2.\lanes, Y_2.\lanes, #3
- shl TMP3.\lanes, Y_3.\lanes, #3
- sri TMP0.\lanes, Y_0.\lanes, #(\n - 3)
- sri TMP1.\lanes, Y_1.\lanes, #(\n - 3)
- sri TMP2.\lanes, Y_2.\lanes, #(\n - 3)
- sri TMP3.\lanes, Y_3.\lanes, #(\n - 3)
-
- // y ^= x
- eor Y_0.16b, TMP0.16b, X_0.16b
- eor Y_1.16b, TMP1.16b, X_1.16b
- eor Y_2.16b, TMP2.16b, X_2.16b
- eor Y_3.16b, TMP3.16b, X_3.16b
-.endm
-
-/*
- * _speck_unround_128bytes() - Speck decryption round on 128 bytes at a time
- *
- * This is the inverse of _speck_round_128bytes().
- */
-.macro _speck_unround_128bytes n, lanes
-
- // y ^= x
- eor TMP0.16b, Y_0.16b, X_0.16b
- eor TMP1.16b, Y_1.16b, X_1.16b
- eor TMP2.16b, Y_2.16b, X_2.16b
- eor TMP3.16b, Y_3.16b, X_3.16b
-
- // y = ror(y, 3)
- ushr Y_0.\lanes, TMP0.\lanes, #3
- ushr Y_1.\lanes, TMP1.\lanes, #3
- ushr Y_2.\lanes, TMP2.\lanes, #3
- ushr Y_3.\lanes, TMP3.\lanes, #3
- sli Y_0.\lanes, TMP0.\lanes, #(\n - 3)
- sli Y_1.\lanes, TMP1.\lanes, #(\n - 3)
- sli Y_2.\lanes, TMP2.\lanes, #(\n - 3)
- sli Y_3.\lanes, TMP3.\lanes, #(\n - 3)
-
- // x ^= k
- eor X_0.16b, X_0.16b, ROUND_KEY.16b
- eor X_1.16b, X_1.16b, ROUND_KEY.16b
- eor X_2.16b, X_2.16b, ROUND_KEY.16b
- eor X_3.16b, X_3.16b, ROUND_KEY.16b
-
- // x -= y
- sub X_0.\lanes, X_0.\lanes, Y_0.\lanes
- sub X_1.\lanes, X_1.\lanes, Y_1.\lanes
- sub X_2.\lanes, X_2.\lanes, Y_2.\lanes
- sub X_3.\lanes, X_3.\lanes, Y_3.\lanes
-
- // x = rol(x, 8)
- tbl X_0.16b, {X_0.16b}, ROTATE_TABLE.16b
- tbl X_1.16b, {X_1.16b}, ROTATE_TABLE.16b
- tbl X_2.16b, {X_2.16b}, ROTATE_TABLE.16b
- tbl X_3.16b, {X_3.16b}, ROTATE_TABLE.16b
-.endm
-
-.macro _next_xts_tweak next, cur, tmp, n
-.if \n == 64
- /*
- * Calculate the next tweak by multiplying the current one by x,
- * modulo p(x) = x^128 + x^7 + x^2 + x + 1.
- */
- sshr \tmp\().2d, \cur\().2d, #63
- and \tmp\().16b, \tmp\().16b, GFMUL_TABLE.16b
- shl \next\().2d, \cur\().2d, #1
- ext \tmp\().16b, \tmp\().16b, \tmp\().16b, #8
- eor \next\().16b, \next\().16b, \tmp\().16b
-.else
- /*
- * Calculate the next two tweaks by multiplying the current ones by x^2,
- * modulo p(x) = x^64 + x^4 + x^3 + x + 1.
- */
- ushr \tmp\().2d, \cur\().2d, #62
- shl \next\().2d, \cur\().2d, #2
- tbl \tmp\().16b, {GFMUL_TABLE.16b}, \tmp\().16b
- eor \next\().16b, \next\().16b, \tmp\().16b
-.endif
-.endm
-
-/*
- * _speck_xts_crypt() - Speck-XTS encryption/decryption
- *
- * Encrypt or decrypt NBYTES bytes of data from the SRC buffer to the DST buffer
- * using Speck-XTS, specifically the variant with a block size of '2n' and round
- * count given by NROUNDS. The expanded round keys are given in ROUND_KEYS, and
- * the current XTS tweak value is given in TWEAK. It's assumed that NBYTES is a
- * nonzero multiple of 128.
- */
-.macro _speck_xts_crypt n, lanes, decrypting
-
- /*
- * If decrypting, modify the ROUND_KEYS parameter to point to the last
- * round key rather than the first, since for decryption the round keys
- * are used in reverse order.
- */
-.if \decrypting
- mov NROUNDS, NROUNDS /* zero the high 32 bits */
-.if \n == 64
- add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #3
- sub ROUND_KEYS, ROUND_KEYS, #8
-.else
- add ROUND_KEYS, ROUND_KEYS, NROUNDS_X, lsl #2
- sub ROUND_KEYS, ROUND_KEYS, #4
-.endif
-.endif
-
- // Load the index vector for tbl-based 8-bit rotates
-.if \decrypting
- ldr ROTATE_TABLE_Q, .Lrol\n\()_8_table
-.else
- ldr ROTATE_TABLE_Q, .Lror\n\()_8_table
-.endif
-
- // One-time XTS preparation
-.if \n == 64
- // Load first tweak
- ld1 {TWEAKV0.16b}, [TWEAK]
-
- // Load GF(2^128) multiplication table
- ldr GFMUL_TABLE_Q, .Lgf128mul_table
-.else
- // Load first tweak
- ld1 {TWEAKV0.8b}, [TWEAK]
-
- // Load GF(2^64) multiplication table
- ldr GFMUL_TABLE_Q, .Lgf64mul_table
-
- // Calculate second tweak, packing it together with the first
- ushr TMP0.2d, TWEAKV0.2d, #63
- shl TMP1.2d, TWEAKV0.2d, #1
- tbl TMP0.8b, {GFMUL_TABLE.16b}, TMP0.8b
- eor TMP0.8b, TMP0.8b, TMP1.8b
- mov TWEAKV0.d[1], TMP0.d[0]
-.endif
-
-.Lnext_128bytes_\@:
-
- // Calculate XTS tweaks for next 128 bytes
- _next_xts_tweak TWEAKV1, TWEAKV0, TMP0, \n
- _next_xts_tweak TWEAKV2, TWEAKV1, TMP0, \n
- _next_xts_tweak TWEAKV3, TWEAKV2, TMP0, \n
- _next_xts_tweak TWEAKV4, TWEAKV3, TMP0, \n
- _next_xts_tweak TWEAKV5, TWEAKV4, TMP0, \n
- _next_xts_tweak TWEAKV6, TWEAKV5, TMP0, \n
- _next_xts_tweak TWEAKV7, TWEAKV6, TMP0, \n
- _next_xts_tweak TWEAKV_NEXT, TWEAKV7, TMP0, \n
-
- // Load the next source blocks into {X,Y}[0-3]
- ld1 {X_0.16b-Y_1.16b}, [SRC], #64
- ld1 {X_2.16b-Y_3.16b}, [SRC], #64
-
- // XOR the source blocks with their XTS tweaks
- eor TMP0.16b, X_0.16b, TWEAKV0.16b
- eor Y_0.16b, Y_0.16b, TWEAKV1.16b
- eor TMP1.16b, X_1.16b, TWEAKV2.16b
- eor Y_1.16b, Y_1.16b, TWEAKV3.16b
- eor TMP2.16b, X_2.16b, TWEAKV4.16b
- eor Y_2.16b, Y_2.16b, TWEAKV5.16b
- eor TMP3.16b, X_3.16b, TWEAKV6.16b
- eor Y_3.16b, Y_3.16b, TWEAKV7.16b
-
- /*
- * De-interleave the 'x' and 'y' elements of each block, i.e. make it so
- * that the X[0-3] registers contain only the second halves of blocks,
- * and the Y[0-3] registers contain only the first halves of blocks.
- * (Speck uses the order (y, x) rather than the more intuitive (x, y).)
- */
- uzp2 X_0.\lanes, TMP0.\lanes, Y_0.\lanes
- uzp1 Y_0.\lanes, TMP0.\lanes, Y_0.\lanes
- uzp2 X_1.\lanes, TMP1.\lanes, Y_1.\lanes
- uzp1 Y_1.\lanes, TMP1.\lanes, Y_1.\lanes
- uzp2 X_2.\lanes, TMP2.\lanes, Y_2.\lanes
- uzp1 Y_2.\lanes, TMP2.\lanes, Y_2.\lanes
- uzp2 X_3.\lanes, TMP3.\lanes, Y_3.\lanes
- uzp1 Y_3.\lanes, TMP3.\lanes, Y_3.\lanes
-
- // Do the cipher rounds
- mov x6, ROUND_KEYS
- mov w7, NROUNDS
-.Lnext_round_\@:
-.if \decrypting
- ld1r {ROUND_KEY.\lanes}, [x6]
- sub x6, x6, #( \n / 8 )
- _speck_unround_128bytes \n, \lanes
-.else
- ld1r {ROUND_KEY.\lanes}, [x6], #( \n / 8 )
- _speck_round_128bytes \n, \lanes
-.endif
- subs w7, w7, #1
- bne .Lnext_round_\@
-
- // Re-interleave the 'x' and 'y' elements of each block
- zip1 TMP0.\lanes, Y_0.\lanes, X_0.\lanes
- zip2 Y_0.\lanes, Y_0.\lanes, X_0.\lanes
- zip1 TMP1.\lanes, Y_1.\lanes, X_1.\lanes
- zip2 Y_1.\lanes, Y_1.\lanes, X_1.\lanes
- zip1 TMP2.\lanes, Y_2.\lanes, X_2.\lanes
- zip2 Y_2.\lanes, Y_2.\lanes, X_2.\lanes
- zip1 TMP3.\lanes, Y_3.\lanes, X_3.\lanes
- zip2 Y_3.\lanes, Y_3.\lanes, X_3.\lanes
-
- // XOR the encrypted/decrypted blocks with the tweaks calculated earlier
- eor X_0.16b, TMP0.16b, TWEAKV0.16b
- eor Y_0.16b, Y_0.16b, TWEAKV1.16b
- eor X_1.16b, TMP1.16b, TWEAKV2.16b
- eor Y_1.16b, Y_1.16b, TWEAKV3.16b
- eor X_2.16b, TMP2.16b, TWEAKV4.16b
- eor Y_2.16b, Y_2.16b, TWEAKV5.16b
- eor X_3.16b, TMP3.16b, TWEAKV6.16b
- eor Y_3.16b, Y_3.16b, TWEAKV7.16b
- mov TWEAKV0.16b, TWEAKV_NEXT.16b
-
- // Store the ciphertext in the destination buffer
- st1 {X_0.16b-Y_1.16b}, [DST], #64
- st1 {X_2.16b-Y_3.16b}, [DST], #64
-
- // Continue if there are more 128-byte chunks remaining
- subs NBYTES, NBYTES, #128
- bne .Lnext_128bytes_\@
-
- // Store the next tweak and return
-.if \n == 64
- st1 {TWEAKV_NEXT.16b}, [TWEAK]
-.else
- st1 {TWEAKV_NEXT.8b}, [TWEAK]
-.endif
- ret
-.endm
-
-ENTRY(speck128_xts_encrypt_neon)
- _speck_xts_crypt n=64, lanes=2d, decrypting=0
-ENDPROC(speck128_xts_encrypt_neon)
-
-ENTRY(speck128_xts_decrypt_neon)
- _speck_xts_crypt n=64, lanes=2d, decrypting=1
-ENDPROC(speck128_xts_decrypt_neon)
-
-ENTRY(speck64_xts_encrypt_neon)
- _speck_xts_crypt n=32, lanes=4s, decrypting=0
-ENDPROC(speck64_xts_encrypt_neon)
-
-ENTRY(speck64_xts_decrypt_neon)
- _speck_xts_crypt n=32, lanes=4s, decrypting=1
-ENDPROC(speck64_xts_decrypt_neon)
diff --git a/arch/arm64/crypto/speck-neon-glue.c b/arch/arm64/crypto/speck-neon-glue.c
deleted file mode 100644
index c7d1856..0000000
--- a/arch/arm64/crypto/speck-neon-glue.c
+++ /dev/null
@@ -1,308 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * NEON-accelerated implementation of Speck128-XTS and Speck64-XTS
- * (64-bit version; based on the 32-bit version)
- *
- * Copyright (c) 2018 Google, Inc
- */
-
-#include <asm/hwcap.h>
-#include <asm/neon.h>
-#include <asm/simd.h>
-#include <crypto/algapi.h>
-#include <crypto/gf128mul.h>
-#include <crypto/speck.h>
-#include <crypto/xts.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-
-/* The assembly functions only handle multiples of 128 bytes */
-#define SPECK_NEON_CHUNK_SIZE 128
-
-/* Speck128 */
-
-struct speck128_xts_tfm_ctx {
- struct speck128_tfm_ctx main_key;
- struct speck128_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck128_xts_encrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck128_xts_decrypt_neon(const u64 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck128_crypt_one_t)(const struct speck128_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck128_xts_crypt_many_t)(const u64 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck128_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- speck128_crypt_one_t crypt_one,
- speck128_xts_crypt_many_t crypt_many)
-{
- struct crypto_blkcipher *tfm = desc->tfm;
- const struct speck128_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct blkcipher_walk walk;
- le128 tweak;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE);
-
- crypto_speck128_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- le128_xor((le128 *)dst, (const le128 *)src, &tweak);
- (*crypt_one)(&ctx->main_key, dst, dst);
- le128_xor((le128 *)dst, (const le128 *)dst, &tweak);
- gf128mul_x_ble((be128 *)&tweak, (const be128 *)&tweak);
-
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
-}
-
-static int speck128_xts_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck128_xts_crypt(desc, dst, src, nbytes,
- crypto_speck128_encrypt,
- speck128_xts_encrypt_neon);
-}
-
-static int speck128_xts_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst,
- struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck128_xts_crypt(desc, dst, src, nbytes,
- crypto_speck128_decrypt,
- speck128_xts_decrypt_neon);
-}
-
-static int speck128_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck128_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- if (keylen % 2)
- return -EINVAL;
-
- keylen /= 2;
-
- err = crypto_speck128_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck128_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-/* Speck64 */
-
-struct speck64_xts_tfm_ctx {
- struct speck64_tfm_ctx main_key;
- struct speck64_tfm_ctx tweak_key;
-};
-
-asmlinkage void speck64_xts_encrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-asmlinkage void speck64_xts_decrypt_neon(const u32 *round_keys, int nrounds,
- void *dst, const void *src,
- unsigned int nbytes, void *tweak);
-
-typedef void (*speck64_crypt_one_t)(const struct speck64_tfm_ctx *,
- u8 *, const u8 *);
-typedef void (*speck64_xts_crypt_many_t)(const u32 *, int, void *,
- const void *, unsigned int, void *);
-
-static __always_inline int
-__speck64_xts_crypt(struct blkcipher_desc *desc, struct scatterlist *dst,
- struct scatterlist *src, unsigned int nbytes,
- speck64_crypt_one_t crypt_one,
- speck64_xts_crypt_many_t crypt_many)
-{
- struct crypto_blkcipher *tfm = desc->tfm;
- const struct speck64_xts_tfm_ctx *ctx = crypto_blkcipher_ctx(tfm);
- struct blkcipher_walk walk;
- __le64 tweak;
- int err;
-
- blkcipher_walk_init(&walk, dst, src, nbytes);
- err = blkcipher_walk_virt_block(desc, &walk, SPECK_NEON_CHUNK_SIZE);
-
- crypto_speck64_encrypt(&ctx->tweak_key, (u8 *)&tweak, walk.iv);
-
- while (walk.nbytes > 0) {
- unsigned int nbytes = walk.nbytes;
- u8 *dst = walk.dst.virt.addr;
- const u8 *src = walk.src.virt.addr;
-
- if (nbytes >= SPECK_NEON_CHUNK_SIZE && may_use_simd()) {
- unsigned int count;
-
- count = round_down(nbytes, SPECK_NEON_CHUNK_SIZE);
- kernel_neon_begin();
- (*crypt_many)(ctx->main_key.round_keys,
- ctx->main_key.nrounds,
- dst, src, count, &tweak);
- kernel_neon_end();
- dst += count;
- src += count;
- nbytes -= count;
- }
-
- /* Handle any remainder with generic code */
- while (nbytes >= sizeof(tweak)) {
- *(__le64 *)dst = *(__le64 *)src ^ tweak;
- (*crypt_one)(&ctx->main_key, dst, dst);
- *(__le64 *)dst ^= tweak;
- tweak = cpu_to_le64((le64_to_cpu(tweak) << 1) ^
- ((tweak & cpu_to_le64(1ULL << 63)) ?
- 0x1B : 0));
- dst += sizeof(tweak);
- src += sizeof(tweak);
- nbytes -= sizeof(tweak);
- }
- err = blkcipher_walk_done(desc, &walk, nbytes);
- }
-
- return err;
-}
-
-static int speck64_xts_encrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck64_xts_crypt(desc, dst, src, nbytes,
- crypto_speck64_encrypt,
- speck64_xts_encrypt_neon);
-}
-
-static int speck64_xts_decrypt(struct blkcipher_desc *desc,
- struct scatterlist *dst, struct scatterlist *src,
- unsigned int nbytes)
-{
- return __speck64_xts_crypt(desc, dst, src, nbytes,
- crypto_speck64_decrypt,
- speck64_xts_decrypt_neon);
-}
-
-static int speck64_xts_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- struct speck64_xts_tfm_ctx *ctx = crypto_tfm_ctx(tfm);
- int err;
-
- if (keylen % 2)
- return -EINVAL;
-
- keylen /= 2;
-
- err = crypto_speck64_setkey(&ctx->main_key, key, keylen);
- if (err)
- return err;
-
- return crypto_speck64_setkey(&ctx->tweak_key, key + keylen, keylen);
-}
-
-static struct crypto_alg speck_algs[] = {
- {
- .cra_name = "xts(speck128)",
- .cra_driver_name = "xts-speck128-neon",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = SPECK128_BLOCK_SIZE,
- .cra_type = &crypto_blkcipher_type,
- .cra_ctxsize = sizeof(struct speck128_xts_tfm_ctx),
- .cra_alignmask = 7,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * SPECK128_128_KEY_SIZE,
- .max_keysize = 2 * SPECK128_256_KEY_SIZE,
- .ivsize = SPECK128_BLOCK_SIZE,
- .setkey = speck128_xts_setkey,
- .encrypt = speck128_xts_encrypt,
- .decrypt = speck128_xts_decrypt,
- }
- }
- }, {
- .cra_name = "xts(speck64)",
- .cra_driver_name = "xts-speck64-neon",
- .cra_priority = 300,
- .cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER,
- .cra_blocksize = SPECK64_BLOCK_SIZE,
- .cra_type = &crypto_blkcipher_type,
- .cra_ctxsize = sizeof(struct speck64_xts_tfm_ctx),
- .cra_alignmask = 7,
- .cra_module = THIS_MODULE,
- .cra_u = {
- .blkcipher = {
- .min_keysize = 2 * SPECK64_96_KEY_SIZE,
- .max_keysize = 2 * SPECK64_128_KEY_SIZE,
- .ivsize = SPECK64_BLOCK_SIZE,
- .setkey = speck64_xts_setkey,
- .encrypt = speck64_xts_encrypt,
- .decrypt = speck64_xts_decrypt,
- }
- }
- }
-};
-
-static int __init speck_neon_module_init(void)
-{
- if (!(elf_hwcap & HWCAP_ASIMD))
- return -ENODEV;
- return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-static void __exit speck_neon_module_exit(void)
-{
- crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-module_init(speck_neon_module_init);
-module_exit(speck_neon_module_exit);
-
-MODULE_DESCRIPTION("Speck block cipher (NEON-accelerated)");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
-MODULE_ALIAS_CRYPTO("xts(speck128)");
-MODULE_ALIAS_CRYPTO("xts-speck128-neon");
-MODULE_ALIAS_CRYPTO("xts(speck64)");
-MODULE_ALIAS_CRYPTO("xts-speck64-neon");
diff --git a/arch/arm64/include/asm/jump_label.h b/arch/arm64/include/asm/jump_label.h
index 1b5e0e8..7e2b3e3 100644
--- a/arch/arm64/include/asm/jump_label.h
+++ b/arch/arm64/include/asm/jump_label.h
@@ -28,7 +28,7 @@
static __always_inline bool arch_static_branch(struct static_key *key, bool branch)
{
- asm goto("1: nop\n\t"
+ asm_volatile_goto("1: nop\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
@@ -42,7 +42,7 @@
static __always_inline bool arch_static_branch_jump(struct static_key *key, bool branch)
{
- asm goto("1: b %l[l_yes]\n\t"
+ asm_volatile_goto("1: b %l[l_yes]\n\t"
".pushsection __jump_table, \"aw\"\n\t"
".align 3\n\t"
".quad 1b, %l[l_yes], %c0\n\t"
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index fe39e68..ba0d52c 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -42,6 +42,11 @@
void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
+static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+ return !(vcpu->arch.hcr_el2 & HCR_RW);
+}
+
static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
{
vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
diff --git a/arch/arm64/kvm/guest.c b/arch/arm64/kvm/guest.c
index d3e0a2f..e41a7b4 100644
--- a/arch/arm64/kvm/guest.c
+++ b/arch/arm64/kvm/guest.c
@@ -57,6 +57,45 @@
return id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | KVM_REG_ARM_CORE);
}
+static int validate_core_offset(const struct kvm_one_reg *reg)
+{
+ u64 off = core_reg_offset_from_id(reg->id);
+ int size;
+
+ switch (off) {
+ case KVM_REG_ARM_CORE_REG(regs.regs[0]) ...
+ KVM_REG_ARM_CORE_REG(regs.regs[30]):
+ case KVM_REG_ARM_CORE_REG(regs.sp):
+ case KVM_REG_ARM_CORE_REG(regs.pc):
+ case KVM_REG_ARM_CORE_REG(regs.pstate):
+ case KVM_REG_ARM_CORE_REG(sp_el1):
+ case KVM_REG_ARM_CORE_REG(elr_el1):
+ case KVM_REG_ARM_CORE_REG(spsr[0]) ...
+ KVM_REG_ARM_CORE_REG(spsr[KVM_NR_SPSR - 1]):
+ size = sizeof(__u64);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.vregs[0]) ...
+ KVM_REG_ARM_CORE_REG(fp_regs.vregs[31]):
+ size = sizeof(__uint128_t);
+ break;
+
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpsr):
+ case KVM_REG_ARM_CORE_REG(fp_regs.fpcr):
+ size = sizeof(__u32);
+ break;
+
+ default:
+ return -EINVAL;
+ }
+
+ if (KVM_REG_SIZE(reg->id) == size &&
+ IS_ALIGNED(off, size / sizeof(__u32)))
+ return 0;
+
+ return -EINVAL;
+}
+
static int get_core_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
{
/*
@@ -76,6 +115,9 @@
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
+ if (validate_core_offset(reg))
+ return -EINVAL;
+
if (copy_to_user(uaddr, ((u32 *)regs) + off, KVM_REG_SIZE(reg->id)))
return -EFAULT;
@@ -98,6 +140,9 @@
(off + (KVM_REG_SIZE(reg->id) / sizeof(__u32))) >= nr_regs)
return -ENOENT;
+ if (validate_core_offset(reg))
+ return -EINVAL;
+
if (KVM_REG_SIZE(reg->id) > sizeof(tmp))
return -EINVAL;
@@ -107,17 +152,25 @@
}
if (off == KVM_REG_ARM_CORE_REG(regs.pstate)) {
- u32 mode = (*(u32 *)valp) & COMPAT_PSR_MODE_MASK;
+ u64 mode = (*(u64 *)valp) & COMPAT_PSR_MODE_MASK;
switch (mode) {
case COMPAT_PSR_MODE_USR:
+ if (!system_supports_32bit_el0())
+ return -EINVAL;
+ break;
case COMPAT_PSR_MODE_FIQ:
case COMPAT_PSR_MODE_IRQ:
case COMPAT_PSR_MODE_SVC:
case COMPAT_PSR_MODE_ABT:
case COMPAT_PSR_MODE_UND:
+ if (!vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
+ break;
case PSR_MODE_EL0t:
case PSR_MODE_EL1t:
case PSR_MODE_EL1h:
+ if (vcpu_el1_is_32bit(vcpu))
+ return -EINVAL;
break;
default:
err = -EINVAL;
diff --git a/arch/hexagon/include/asm/bitops.h b/arch/hexagon/include/asm/bitops.h
index 5e4a59b..2691a18 100644
--- a/arch/hexagon/include/asm/bitops.h
+++ b/arch/hexagon/include/asm/bitops.h
@@ -211,7 +211,7 @@
* This is defined the same way as ffs.
* Note fls(0) = 0, fls(1) = 1, fls(0x80000000) = 32.
*/
-static inline long fls(int x)
+static inline int fls(int x)
{
int r;
@@ -232,7 +232,7 @@
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
-static inline long ffs(int x)
+static inline int ffs(int x)
{
int r;
diff --git a/arch/hexagon/kernel/dma.c b/arch/hexagon/kernel/dma.c
index b901778..0e2be48 100644
--- a/arch/hexagon/kernel/dma.c
+++ b/arch/hexagon/kernel/dma.c
@@ -68,7 +68,7 @@
panic("Can't create %s() memory pool!", __func__);
else
gen_pool_add(coherent_pool,
- pfn_to_virt(max_low_pfn),
+ (unsigned long)pfn_to_virt(max_low_pfn),
hexagon_coherent_pool_size, -1);
}
diff --git a/arch/powerpc/kernel/fadump.c b/arch/powerpc/kernel/fadump.c
index e3acf5c..0292504 100644
--- a/arch/powerpc/kernel/fadump.c
+++ b/arch/powerpc/kernel/fadump.c
@@ -365,9 +365,9 @@
}
early_param("fadump_reserve_mem", early_fadump_reserve_mem);
-static void register_fw_dump(struct fadump_mem_struct *fdm)
+static int register_fw_dump(struct fadump_mem_struct *fdm)
{
- int rc;
+ int rc, err;
unsigned int wait_time;
pr_debug("Registering for firmware-assisted kernel dump...\n");
@@ -384,7 +384,11 @@
} while (wait_time);
+ err = -EIO;
switch (rc) {
+ default:
+ pr_err("Failed to register. Unknown Error(%d).\n", rc);
+ break;
case -1:
printk(KERN_ERR "Failed to register firmware-assisted kernel"
" dump. Hardware Error(%d).\n", rc);
@@ -392,18 +396,22 @@
case -3:
printk(KERN_ERR "Failed to register firmware-assisted kernel"
" dump. Parameter Error(%d).\n", rc);
+ err = -EINVAL;
break;
case -9:
printk(KERN_ERR "firmware-assisted kernel dump is already "
" registered.");
fw_dump.dump_registered = 1;
+ err = -EEXIST;
break;
case 0:
printk(KERN_INFO "firmware-assisted kernel dump registration"
" is successful\n");
fw_dump.dump_registered = 1;
+ err = 0;
break;
}
+ return err;
}
void crash_fadump(struct pt_regs *regs, const char *str)
@@ -1006,7 +1014,7 @@
return addr;
}
-static void register_fadump(void)
+static int register_fadump(void)
{
unsigned long addr;
void *vaddr;
@@ -1017,7 +1025,7 @@
* assisted dump.
*/
if (!fw_dump.reserve_dump_area_size)
- return;
+ return -ENODEV;
ret = fadump_setup_crash_memory_ranges();
if (ret)
@@ -1032,7 +1040,7 @@
fadump_create_elfcore_headers(vaddr);
/* register the future kernel dump with firmware. */
- register_fw_dump(&fdm);
+ return register_fw_dump(&fdm);
}
static int fadump_unregister_dump(struct fadump_mem_struct *fdm)
@@ -1218,7 +1226,6 @@
switch (buf[0]) {
case '0':
if (fw_dump.dump_registered == 0) {
- ret = -EINVAL;
goto unlock_out;
}
/* Un-register Firmware-assisted dump */
@@ -1226,11 +1233,11 @@
break;
case '1':
if (fw_dump.dump_registered == 1) {
- ret = -EINVAL;
+ ret = -EEXIST;
goto unlock_out;
}
/* Register Firmware-assisted dump */
- register_fadump();
+ ret = register_fadump();
break;
default:
ret = -EINVAL;
diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c
index 2694d07..9dafd7a 100644
--- a/arch/powerpc/kernel/machine_kexec.c
+++ b/arch/powerpc/kernel/machine_kexec.c
@@ -186,7 +186,12 @@
(unsigned long)(crashk_res.start >> 20),
(unsigned long)(memblock_phys_mem_size() >> 20));
- memblock_reserve(crashk_res.start, crash_size);
+ if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
+ memblock_reserve(crashk_res.start, crash_size)) {
+ pr_err("Failed to reserve memory for crashkernel!\n");
+ crashk_res.start = crashk_res.end = 0;
+ return;
+ }
}
int overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/kernel/tm.S b/arch/powerpc/kernel/tm.S
index 3a2d041..f59b738 100644
--- a/arch/powerpc/kernel/tm.S
+++ b/arch/powerpc/kernel/tm.S
@@ -166,13 +166,27 @@
std r1, PACATMSCRATCH(r13)
ld r1, PACAR1(r13)
- /* Store the PPR in r11 and reset to decent value */
std r11, GPR11(r1) /* Temporary stash */
+ /*
+ * Move the saved user r1 to the kernel stack in case PACATMSCRATCH is
+ * clobbered by an exception once we turn on MSR_RI below.
+ */
+ ld r11, PACATMSCRATCH(r13)
+ std r11, GPR1(r1)
+
+ /*
+ * Store r13 away so we can free up the scratch SPR for the SLB fault
+ * handler (needed once we start accessing the thread_struct).
+ */
+ GET_SCRATCH0(r11)
+ std r11, GPR13(r1)
+
/* Reset MSR RI so we can take SLB faults again */
li r11, MSR_RI
mtmsrd r11, 1
+ /* Store the PPR in r11 and reset to decent value */
mfspr r11, SPRN_PPR
HMT_MEDIUM
@@ -197,11 +211,11 @@
SAVE_GPR(8, r7) /* user r8 */
SAVE_GPR(9, r7) /* user r9 */
SAVE_GPR(10, r7) /* user r10 */
- ld r3, PACATMSCRATCH(r13) /* user r1 */
+ ld r3, GPR1(r1) /* user r1 */
ld r4, GPR7(r1) /* user r7 */
ld r5, GPR11(r1) /* user r11 */
ld r6, GPR12(r1) /* user r12 */
- GET_SCRATCH0(8) /* user r13 */
+ ld r8, GPR13(r1) /* user r13 */
std r3, GPR1(r7)
std r4, GPR7(r7)
std r5, GPR11(r7)
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index 05f09ae..915e89f 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -314,7 +314,7 @@
unsigned long pp, key;
unsigned long v, gr;
__be64 *hptep;
- int index;
+ long int index;
int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
/* Get SLB entry */
diff --git a/arch/powerpc/platforms/powernv/pci-ioda.c b/arch/powerpc/platforms/powernv/pci-ioda.c
index f52cc6f..8015e40 100644
--- a/arch/powerpc/platforms/powernv/pci-ioda.c
+++ b/arch/powerpc/platforms/powernv/pci-ioda.c
@@ -2623,7 +2623,7 @@
level_shift = entries_shift + 3;
level_shift = max_t(unsigned, level_shift, PAGE_SHIFT);
- if ((level_shift - 3) * levels + page_shift >= 60)
+ if ((level_shift - 3) * levels + page_shift >= 55)
return -EINVAL;
/* Allocate TCE table */
diff --git a/arch/s390/appldata/appldata_os.c b/arch/s390/appldata/appldata_os.c
index 69b23b2..08b9e94 100644
--- a/arch/s390/appldata/appldata_os.c
+++ b/arch/s390/appldata/appldata_os.c
@@ -113,21 +113,21 @@
j = 0;
for_each_online_cpu(i) {
os_data->os_cpu[j].per_cpu_user =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_USER]);
os_data->os_cpu[j].per_cpu_nice =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_NICE]);
os_data->os_cpu[j].per_cpu_system =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]);
os_data->os_cpu[j].per_cpu_idle =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IDLE]);
os_data->os_cpu[j].per_cpu_irq =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IRQ]);
os_data->os_cpu[j].per_cpu_softirq =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]);
os_data->os_cpu[j].per_cpu_iowait =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_IOWAIT]);
os_data->os_cpu[j].per_cpu_steal =
- cputime_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
+ nsecs_to_jiffies(kcpustat_cpu(i).cpustat[CPUTIME_STEAL]);
os_data->os_cpu[j].cpu_id = i;
j++;
}
diff --git a/arch/s390/mm/extmem.c b/arch/s390/mm/extmem.c
index 02042b6..e6665a6 100644
--- a/arch/s390/mm/extmem.c
+++ b/arch/s390/mm/extmem.c
@@ -79,7 +79,7 @@
struct dcss_segment {
struct list_head list;
char dcss_name[8];
- char res_name[15];
+ char res_name[16];
unsigned long start_addr;
unsigned long end;
atomic_t ref_count;
@@ -432,7 +432,7 @@
memcpy(&seg->res_name, seg->dcss_name, 8);
EBCASC(seg->res_name, 8);
seg->res_name[8] = '\0';
- strncat(seg->res_name, " (DCSS)", 7);
+ strlcat(seg->res_name, " (DCSS)", sizeof(seg->res_name));
seg->res->name = seg->res_name;
rc = seg->vm_segtype;
if (rc == SEG_TYPE_SC ||
diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c
index 995f785..781a044 100644
--- a/arch/s390/mm/pgalloc.c
+++ b/arch/s390/mm/pgalloc.c
@@ -26,7 +26,7 @@
.data = &page_table_allocate_pgste,
.maxlen = sizeof(int),
.mode = S_IRUGO | S_IWUSR,
- .proc_handler = proc_dointvec,
+ .proc_handler = proc_dointvec_minmax,
.extra1 = &page_table_allocate_pgste_min,
.extra2 = &page_table_allocate_pgste_max,
},
diff --git a/arch/x86/configs/x86_64_cuttlefish_defconfig b/arch/x86/configs/x86_64_cuttlefish_defconfig
index 98543be..f2a764f 100644
--- a/arch/x86/configs/x86_64_cuttlefish_defconfig
+++ b/arch/x86/configs/x86_64_cuttlefish_defconfig
@@ -210,7 +210,6 @@
CONFIG_BLK_DEV_RAM_SIZE=8192
CONFIG_VIRTIO_BLK=y
CONFIG_UID_SYS_STATS=y
-CONFIG_MEMORY_STATE_TIME=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_BLK_DEV_SR=y
diff --git a/arch/x86/crypto/crc32c-intel_glue.c b/arch/x86/crypto/crc32c-intel_glue.c
index dd19584..5773e11 100644
--- a/arch/x86/crypto/crc32c-intel_glue.c
+++ b/arch/x86/crypto/crc32c-intel_glue.c
@@ -48,21 +48,13 @@
#ifdef CONFIG_X86_64
/*
* use carryless multiply version of crc32c when buffer
- * size is >= 512 (when eager fpu is enabled) or
- * >= 1024 (when eager fpu is disabled) to account
+ * size is >= 512 to account
* for fpu state save/restore overhead.
*/
-#define CRC32C_PCL_BREAKEVEN_EAGERFPU 512
-#define CRC32C_PCL_BREAKEVEN_NOEAGERFPU 1024
+#define CRC32C_PCL_BREAKEVEN 512
asmlinkage unsigned int crc_pcl(const u8 *buffer, int len,
unsigned int crc_init);
-static int crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_EAGERFPU;
-#define set_pcl_breakeven_point() \
-do { \
- if (!use_eager_fpu()) \
- crc32c_pcl_breakeven = CRC32C_PCL_BREAKEVEN_NOEAGERFPU; \
-} while (0)
#endif /* CONFIG_X86_64 */
static u32 crc32c_intel_le_hw_byte(u32 crc, unsigned char const *data, size_t length)
@@ -185,7 +177,7 @@
* use faster PCL version if datasize is large enough to
* overcome kernel fpu state save/restore overhead
*/
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*crcp = crc_pcl(data, len, *crcp);
kernel_fpu_end();
@@ -197,7 +189,7 @@
static int __crc32c_pcl_intel_finup(u32 *crcp, const u8 *data, unsigned int len,
u8 *out)
{
- if (len >= crc32c_pcl_breakeven && irq_fpu_usable()) {
+ if (len >= CRC32C_PCL_BREAKEVEN && irq_fpu_usable()) {
kernel_fpu_begin();
*(__le32 *)out = ~cpu_to_le32(crc_pcl(data, len, *crcp));
kernel_fpu_end();
@@ -257,7 +249,6 @@
alg.update = crc32c_pcl_intel_update;
alg.finup = crc32c_pcl_intel_finup;
alg.digest = crc32c_pcl_intel_digest;
- set_pcl_breakeven_point();
}
#endif
return crypto_register_shash(&alg);
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
index 28f3691..d764992 100644
--- a/arch/x86/entry/entry_64.S
+++ b/arch/x86/entry/entry_64.S
@@ -91,7 +91,7 @@
.endm
.macro TRACE_IRQS_IRETQ_DEBUG
- bt $9, EFLAGS(%rsp) /* interrupts off? */
+ btl $9, EFLAGS(%rsp) /* interrupts off? */
jnc 1f
TRACE_IRQS_ON_DEBUG
1:
@@ -485,7 +485,7 @@
#ifdef CONFIG_PREEMPT
/* Interrupts are off */
/* Check if we need preemption */
- bt $9, EFLAGS(%rsp) /* were interrupts off? */
+ btl $9, EFLAGS(%rsp) /* were interrupts off? */
jnc 1f
0: cmpl $0, PER_CPU_VAR(__preempt_count)
jnz 1f
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
index 02223cb..1e96709 100644
--- a/arch/x86/entry/vdso/vclock_gettime.c
+++ b/arch/x86/entry/vdso/vclock_gettime.c
@@ -37,8 +37,9 @@
notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+ asm ("syscall" : "=a" (ret), "=m" (*ts) :
+ "0" (__NR_clock_gettime), "D" (clock), "S" (ts) :
+ "memory", "rcx", "r11");
return ret;
}
@@ -46,8 +47,9 @@
{
long ret;
- asm("syscall" : "=a" (ret) :
- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+ asm ("syscall" : "=a" (ret), "=m" (*tv), "=m" (*tz) :
+ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) :
+ "memory", "rcx", "r11");
return ret;
}
@@ -58,13 +60,13 @@
{
long ret;
- asm(
+ asm (
"mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
+ "mov %[clock], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+ : "=a" (ret), "=m" (*ts)
+ : "0" (__NR_clock_gettime), [clock] "g" (clock), "c" (ts)
: "memory", "edx");
return ret;
}
@@ -73,13 +75,13 @@
{
long ret;
- asm(
+ asm (
"mov %%ebx, %%edx \n"
- "mov %2, %%ebx \n"
+ "mov %[tv], %%ebx \n"
"call __kernel_vsyscall \n"
"mov %%edx, %%ebx \n"
- : "=a" (ret)
- : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+ : "=a" (ret), "=m" (*tv), "=m" (*tz)
+ : "0" (__NR_gettimeofday), [tv] "g" (tv), "c" (tz)
: "memory", "edx");
return ret;
}
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 5d103a8..10c1a5c 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -342,7 +342,7 @@
mask = x86_pmu.lbr_nr - 1;
tos = task_ctx->tos;
- for (i = 0; i < tos; i++) {
+ for (i = 0; i < task_ctx->valid_lbrs; i++) {
lbr_idx = (tos - i) & mask;
wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
@@ -350,6 +350,15 @@
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+
+ for (; i < x86_pmu.lbr_nr; i++) {
+ lbr_idx = (tos - i) & mask;
+ wrlbr_from(lbr_idx, 0);
+ wrlbr_to(lbr_idx, 0);
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
+ wrmsrl(MSR_LBR_INFO_0 + lbr_idx, 0);
+ }
+
wrmsrl(x86_pmu.lbr_tos, tos);
task_ctx->lbr_stack_state = LBR_NONE;
}
@@ -357,7 +366,7 @@
static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
{
unsigned lbr_idx, mask;
- u64 tos;
+ u64 tos, from;
int i;
if (task_ctx->lbr_callstack_users == 0) {
@@ -367,13 +376,17 @@
mask = x86_pmu.lbr_nr - 1;
tos = intel_pmu_lbr_tos();
- for (i = 0; i < tos; i++) {
+ for (i = 0; i < x86_pmu.lbr_nr; i++) {
lbr_idx = (tos - i) & mask;
- task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
+ from = rdlbr_from(lbr_idx);
+ if (!from)
+ break;
+ task_ctx->lbr_from[i] = from;
task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
}
+ task_ctx->valid_lbrs = i;
task_ctx->tos = tos;
task_ctx->lbr_stack_state = LBR_VALID;
}
@@ -522,7 +535,7 @@
*/
static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
{
- bool need_info = false;
+ bool need_info = false, call_stack = false;
unsigned long mask = x86_pmu.lbr_nr - 1;
int lbr_format = x86_pmu.intel_cap.lbr_format;
u64 tos = intel_pmu_lbr_tos();
@@ -533,7 +546,7 @@
if (cpuc->lbr_sel) {
need_info = !(cpuc->lbr_sel->config & LBR_NO_INFO);
if (cpuc->lbr_sel->config & LBR_CALL_STACK)
- num = tos;
+ call_stack = true;
}
for (i = 0; i < num; i++) {
@@ -546,6 +559,13 @@
from = rdlbr_from(lbr_idx);
to = rdlbr_to(lbr_idx);
+ /*
+ * Read LBR call stack entries
+ * until invalid entry (0s) is detected.
+ */
+ if (call_stack && !from)
+ break;
+
if (lbr_format == LBR_FORMAT_INFO && need_info) {
u64 info;
@@ -1175,4 +1195,8 @@
x86_pmu.lbr_sel_mask = LBR_SEL_MASK;
x86_pmu.lbr_sel_map = snb_lbr_sel_map;
+
+ /* Knights Landing does have MISPREDICT bit */
+ if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_LIP)
+ x86_pmu.intel_cap.lbr_format = LBR_FORMAT_EIP_FLAGS;
}
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index f356317..1bfebbc 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -633,6 +633,7 @@
u64 lbr_to[MAX_LBR_ENTRIES];
u64 lbr_info[MAX_LBR_ENTRIES];
int tos;
+ int valid_lbrs;
int lbr_callstack_users;
int lbr_stack_state;
};
diff --git a/arch/x86/include/asm/cpufeatures.h b/arch/x86/include/asm/cpufeatures.h
index fbc1474..f6d1bc9 100644
--- a/arch/x86/include/asm/cpufeatures.h
+++ b/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
diff --git a/arch/x86/include/asm/fixmap.h b/arch/x86/include/asm/fixmap.h
index 8554f96..2515284 100644
--- a/arch/x86/include/asm/fixmap.h
+++ b/arch/x86/include/asm/fixmap.h
@@ -14,6 +14,16 @@
#ifndef _ASM_X86_FIXMAP_H
#define _ASM_X86_FIXMAP_H
+/*
+ * Exposed to assembly code for setting up initial page tables. Cannot be
+ * calculated in assembly code (fixmap entries are an enum), but is sanity
+ * checked in the actual fixmap C code to make sure that the fixmap is
+ * covered fully.
+ */
+#define FIXMAP_PMD_NUM 2
+/* fixmap starts downwards from the 507th entry in level2_fixmap_pgt */
+#define FIXMAP_PMD_TOP 507
+
#ifndef __ASSEMBLY__
#include <linux/kernel.h>
#include <asm/acpi.h>
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
index 8852e3a..499d6ed 100644
--- a/arch/x86/include/asm/fpu/internal.h
+++ b/arch/x86/include/asm/fpu/internal.h
@@ -60,11 +60,6 @@
/*
* FPU related CPU feature flag helper routines:
*/
-static __always_inline __pure bool use_eager_fpu(void)
-{
- return true;
-}
-
static __always_inline __pure bool use_xsaveopt(void)
{
return static_cpu_has(X86_FEATURE_XSAVEOPT);
@@ -501,24 +496,6 @@
}
-/*
- * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
- * idiom, which is then paired with the sw-flag (fpregs_active) later on:
- */
-
-static inline void __fpregs_activate_hw(void)
-{
- if (!use_eager_fpu())
- clts();
-}
-
-static inline void __fpregs_deactivate_hw(void)
-{
- if (!use_eager_fpu())
- stts();
-}
-
-/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
static inline void __fpregs_deactivate(struct fpu *fpu)
{
WARN_ON_FPU(!fpu->fpregs_active);
@@ -528,7 +505,6 @@
trace_x86_fpu_regs_deactivated(fpu);
}
-/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
static inline void __fpregs_activate(struct fpu *fpu)
{
WARN_ON_FPU(fpu->fpregs_active);
@@ -554,22 +530,17 @@
}
/*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
* These generally need preemption protection to work,
* do try to avoid using these on their own.
*/
static inline void fpregs_activate(struct fpu *fpu)
{
- __fpregs_activate_hw();
__fpregs_activate(fpu);
}
static inline void fpregs_deactivate(struct fpu *fpu)
{
__fpregs_deactivate(fpu);
- __fpregs_deactivate_hw();
}
/*
@@ -596,8 +567,7 @@
* or if the past 5 consecutive context-switches used math.
*/
fpu.preload = static_cpu_has(X86_FEATURE_FPU) &&
- new_fpu->fpstate_active &&
- (use_eager_fpu() || new_fpu->counter > 5);
+ new_fpu->fpstate_active;
if (old_fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(old_fpu))
@@ -611,18 +581,13 @@
/* Don't change CR0.TS if we just switch! */
if (fpu.preload) {
- new_fpu->counter++;
__fpregs_activate(new_fpu);
trace_x86_fpu_regs_activated(new_fpu);
prefetch(&new_fpu->state);
- } else {
- __fpregs_deactivate_hw();
}
} else {
- old_fpu->counter = 0;
old_fpu->last_cpu = -1;
if (fpu.preload) {
- new_fpu->counter++;
if (fpu_want_lazy_restore(new_fpu, cpu))
fpu.preload = 0;
else
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
index 48df486..3c80f5b 100644
--- a/arch/x86/include/asm/fpu/types.h
+++ b/arch/x86/include/asm/fpu/types.h
@@ -322,17 +322,6 @@
unsigned char fpregs_active;
/*
- * @counter:
- *
- * This counter contains the number of consecutive context switches
- * during which the FPU stays used. If this is over a threshold, the
- * lazy FPU restore logic becomes eager, to save the trap overhead.
- * This is an unsigned char so that after 256 iterations the counter
- * wraps and the context switch behavior turns lazy again; this is to
- * deal with bursty apps that only use the FPU for a short time:
- */
- unsigned char counter;
- /*
* @state:
*
* In-memory copy of all FPU registers that we save/restore
@@ -340,29 +329,6 @@
* the registers in the FPU are more recent than this state
* copy. If the task context-switches away then they get
* saved here and represent the FPU state.
- *
- * After context switches there may be a (short) time period
- * during which the in-FPU hardware registers are unchanged
- * and still perfectly match this state, if the tasks
- * scheduled afterwards are not using the FPU.
- *
- * This is the 'lazy restore' window of optimization, which
- * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
- *
- * We detect whether a subsequent task uses the FPU via setting
- * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
- *
- * During this window, if the task gets scheduled again, we
- * might be able to skip having to do a restore from this
- * memory buffer to the hardware registers - at the cost of
- * incurring the overhead of #NM fault traps.
- *
- * Note that on modern CPUs that support the XSAVEOPT (or other
- * optimized XSAVE instructions), we don't use #NM traps anymore,
- * as the hardware can track whether FPU registers need saving
- * or not. On such CPUs we activate the non-lazy ('eagerfpu')
- * logic, which unconditionally saves/restores all FPU state
- * across context switches. (if FPU state exists.)
*/
union fpregs_state state;
/*
diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
index 221a32e..d5c4df9 100644
--- a/arch/x86/include/asm/pgtable_64.h
+++ b/arch/x86/include/asm/pgtable_64.h
@@ -13,13 +13,14 @@
#include <asm/processor.h>
#include <linux/bitops.h>
#include <linux/threads.h>
+#include <asm/fixmap.h>
extern pud_t level3_kernel_pgt[512];
extern pud_t level3_ident_pgt[512];
extern pmd_t level2_kernel_pgt[512];
extern pmd_t level2_fixmap_pgt[512];
extern pmd_t level2_ident_pgt[512];
-extern pte_t level1_fixmap_pgt[512];
+extern pte_t level1_fixmap_pgt[512 * FIXMAP_PMD_NUM];
extern pgd_t init_level4_pgt[];
#define swapper_pg_dir init_level4_pgt
diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
index dfdb7e2..93d089c 100644
--- a/arch/x86/include/asm/pgtable_types.h
+++ b/arch/x86/include/asm/pgtable_types.h
@@ -134,7 +134,7 @@
*/
#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
_PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
- _PAGE_SOFT_DIRTY)
+ _PAGE_SOFT_DIRTY | _PAGE_DEVMAP)
#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
/* The ASID is the lower 12 bits of CR3 */
diff --git a/arch/x86/include/asm/trace/fpu.h b/arch/x86/include/asm/trace/fpu.h
index 9217ab1..342e597 100644
--- a/arch/x86/include/asm/trace/fpu.h
+++ b/arch/x86/include/asm/trace/fpu.h
@@ -14,7 +14,6 @@
__field(struct fpu *, fpu)
__field(bool, fpregs_active)
__field(bool, fpstate_active)
- __field(int, counter)
__field(u64, xfeatures)
__field(u64, xcomp_bv)
),
@@ -23,17 +22,15 @@
__entry->fpu = fpu;
__entry->fpregs_active = fpu->fpregs_active;
__entry->fpstate_active = fpu->fpstate_active;
- __entry->counter = fpu->counter;
if (boot_cpu_has(X86_FEATURE_OSXSAVE)) {
__entry->xfeatures = fpu->state.xsave.header.xfeatures;
__entry->xcomp_bv = fpu->state.xsave.header.xcomp_bv;
}
),
- TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d counter: %d xfeatures: %llx xcomp_bv: %llx",
+ TP_printk("x86/fpu: %p fpregs_active: %d fpstate_active: %d xfeatures: %llx xcomp_bv: %llx",
__entry->fpu,
__entry->fpregs_active,
__entry->fpstate_active,
- __entry->counter,
__entry->xfeatures,
__entry->xcomp_bv
)
diff --git a/arch/x86/include/uapi/asm/kvm.h b/arch/x86/include/uapi/asm/kvm.h
index 739c0c5..1bb90fa 100644
--- a/arch/x86/include/uapi/asm/kvm.h
+++ b/arch/x86/include/uapi/asm/kvm.h
@@ -356,5 +356,6 @@
#define KVM_X86_QUIRK_LINT0_REENABLED (1 << 0)
#define KVM_X86_QUIRK_CD_NW_CLEARED (1 << 1)
+#define KVM_X86_QUIRK_LAPIC_MMIO_HOLE (1 << 2)
#endif /* _ASM_X86_KVM_H */
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
index 430c095..fc96511 100644
--- a/arch/x86/kernel/fpu/core.c
+++ b/arch/x86/kernel/fpu/core.c
@@ -59,27 +59,9 @@
return this_cpu_read(in_kernel_fpu);
}
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
static bool interrupted_kernel_fpu_idle(void)
{
- if (kernel_fpu_disabled())
- return false;
-
- if (use_eager_fpu())
- return true;
-
- return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+ return !kernel_fpu_disabled();
}
/*
@@ -127,7 +109,6 @@
copy_fpregs_to_fpstate(fpu);
} else {
this_cpu_write(fpu_fpregs_owner_ctx, NULL);
- __fpregs_activate_hw();
}
}
EXPORT_SYMBOL(__kernel_fpu_begin);
@@ -138,8 +119,6 @@
if (fpu->fpregs_active)
copy_kernel_to_fpregs(&fpu->state);
- else
- __fpregs_deactivate_hw();
kernel_fpu_enable();
}
@@ -201,10 +180,7 @@
trace_x86_fpu_before_save(fpu);
if (fpu->fpregs_active) {
if (!copy_fpregs_to_fpstate(fpu)) {
- if (use_eager_fpu())
- copy_kernel_to_fpregs(&fpu->state);
- else
- fpregs_deactivate(fpu);
+ copy_kernel_to_fpregs(&fpu->state);
}
}
trace_x86_fpu_after_save(fpu);
@@ -249,7 +225,6 @@
int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
{
- dst_fpu->counter = 0;
dst_fpu->fpregs_active = 0;
dst_fpu->last_cpu = -1;
@@ -262,8 +237,7 @@
* Don't let 'init optimized' areas of the XSAVE area
* leak into the child task:
*/
- if (use_eager_fpu())
- memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
+ memset(&dst_fpu->state.xsave, 0, fpu_kernel_xstate_size);
/*
* Save current FPU registers directly into the child
@@ -285,10 +259,7 @@
memcpy(&src_fpu->state, &dst_fpu->state,
fpu_kernel_xstate_size);
- if (use_eager_fpu())
- copy_kernel_to_fpregs(&src_fpu->state);
- else
- fpregs_deactivate(src_fpu);
+ copy_kernel_to_fpregs(&src_fpu->state);
}
preempt_enable();
@@ -461,7 +432,6 @@
trace_x86_fpu_before_restore(fpu);
fpregs_activate(fpu);
copy_kernel_to_fpregs(&fpu->state);
- fpu->counter++;
trace_x86_fpu_after_restore(fpu);
kernel_fpu_enable();
}
@@ -479,7 +449,6 @@
void fpu__drop(struct fpu *fpu)
{
preempt_disable();
- fpu->counter = 0;
if (fpu->fpregs_active) {
/* Ignore delayed exceptions from user space */
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
index 3ec0d2d..3a93186 100644
--- a/arch/x86/kernel/fpu/signal.c
+++ b/arch/x86/kernel/fpu/signal.c
@@ -344,11 +344,9 @@
}
fpu->fpstate_active = 1;
- if (use_eager_fpu()) {
- preempt_disable();
- fpu__restore(fpu);
- preempt_enable();
- }
+ preempt_disable();
+ fpu__restore(fpu);
+ preempt_enable();
return err;
} else {
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
index abfbb61b..e9d7f46 100644
--- a/arch/x86/kernel/fpu/xstate.c
+++ b/arch/x86/kernel/fpu/xstate.c
@@ -890,15 +890,6 @@
*/
if (!boot_cpu_has(X86_FEATURE_OSPKE))
return -EINVAL;
- /*
- * For most XSAVE components, this would be an arduous task:
- * brining fpstate up to date with fpregs, updating fpstate,
- * then re-populating fpregs. But, for components that are
- * never lazily managed, we can just access the fpregs
- * directly. PKRU is never managed lazily, so we can just
- * manipulate it directly. Make sure it stays that way.
- */
- WARN_ON_ONCE(!use_eager_fpu());
/* Set the bits we need in PKRU: */
if (init_val & PKEY_DISABLE_ACCESS)
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 9d72cf5..b0d6697 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -23,6 +23,7 @@
#include "../entry/calling.h"
#include <asm/export.h>
#include <asm/nospec-branch.h>
+#include <asm/fixmap.h>
#ifdef CONFIG_PARAVIRT
#include <asm/asm-offsets.h>
@@ -493,13 +494,20 @@
KERNEL_IMAGE_SIZE/PMD_SIZE)
NEXT_PAGE(level2_fixmap_pgt)
- .fill 506,8,0
- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
- .fill 5,8,0
+ .fill (512 - 4 - FIXMAP_PMD_NUM),8,0
+ pgtno = 0
+ .rept (FIXMAP_PMD_NUM)
+ .quad level1_fixmap_pgt + (pgtno << PAGE_SHIFT) - __START_KERNEL_map \
+ + _PAGE_TABLE;
+ pgtno = pgtno + 1
+ .endr
+ /* 6 MB reserved space + a 2MB hole */
+ .fill 4,8,0
NEXT_PAGE(level1_fixmap_pgt)
+ .rept (FIXMAP_PMD_NUM)
.fill 512,8,0
+ .endr
#undef PMDS
diff --git a/arch/x86/kernel/tsc_msr.c b/arch/x86/kernel/tsc_msr.c
index 0fe720d..3f818ce 100644
--- a/arch/x86/kernel/tsc_msr.c
+++ b/arch/x86/kernel/tsc_msr.c
@@ -12,6 +12,7 @@
#include <asm/setup.h>
#include <asm/apic.h>
#include <asm/param.h>
+#include <asm/tsc.h>
#define MAX_NUM_FREQS 9
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 7e5119c..c17d389 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -16,7 +16,6 @@
#include <linux/export.h>
#include <linux/vmalloc.h>
#include <linux/uaccess.h>
-#include <asm/fpu/internal.h> /* For use_eager_fpu. Ugh! */
#include <asm/user.h>
#include <asm/fpu/xstate.h>
#include "cpuid.h"
@@ -114,8 +113,7 @@
if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
- if (use_eager_fpu())
- kvm_x86_ops->fpu_activate(vcpu);
+ kvm_x86_ops->fpu_activate(vcpu);
/*
* The existing code assumes virtual address is 48-bit in the canonical
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index a8a86be..69a81a7 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -1220,9 +1220,8 @@
static int apic_mmio_in_range(struct kvm_lapic *apic, gpa_t addr)
{
- return kvm_apic_hw_enabled(apic) &&
- addr >= apic->base_address &&
- addr < apic->base_address + LAPIC_MMIO_LENGTH;
+ return addr >= apic->base_address &&
+ addr < apic->base_address + LAPIC_MMIO_LENGTH;
}
static int apic_mmio_read(struct kvm_vcpu *vcpu, struct kvm_io_device *this,
@@ -1234,6 +1233,15 @@
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
+ if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+ if (!kvm_check_has_quirk(vcpu->kvm,
+ KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+ return -EOPNOTSUPP;
+
+ memset(data, 0xff, len);
+ return 0;
+ }
+
kvm_lapic_reg_read(apic, offset, len, data);
return 0;
@@ -1646,6 +1654,14 @@
if (!apic_mmio_in_range(apic, address))
return -EOPNOTSUPP;
+ if (!kvm_apic_hw_enabled(apic) || apic_x2apic_mode(apic)) {
+ if (!kvm_check_has_quirk(vcpu->kvm,
+ KVM_X86_QUIRK_LAPIC_MMIO_HOLE))
+ return -EOPNOTSUPP;
+
+ return 0;
+ }
+
/*
* APIC register must be aligned on 128-bits boundary.
* 32/64/128 bits registers must be accessed thru 32 bits.
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 203d423..5013ef1 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -7631,16 +7631,6 @@
copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
__kernel_fpu_end();
++vcpu->stat.fpu_reload;
- /*
- * If using eager FPU mode, or if the guest is a frequent user
- * of the FPU, just leave the FPU active for next time.
- * Every 255 times fpu_counter rolls over to 0; a guest that uses
- * the FPU in bursts will revert to loading it on demand.
- */
- if (!use_eager_fpu()) {
- if (++vcpu->fpu_counter < 5)
- kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
- }
trace_kvm_fpu(0);
}
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index a8f90ce..dc6d990 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -60,7 +60,7 @@
eb->nid = nid;
if (emu_nid_to_phys[nid] == NUMA_NO_NODE)
- emu_nid_to_phys[nid] = nid;
+ emu_nid_to_phys[nid] = pb->nid;
pb->start += size;
if (pb->start >= pb->end) {
diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
index e30baa8..8cbed30 100644
--- a/arch/x86/mm/pgtable.c
+++ b/arch/x86/mm/pgtable.c
@@ -536,6 +536,15 @@
{
unsigned long address = __fix_to_virt(idx);
+#ifdef CONFIG_X86_64
+ /*
+ * Ensure that the static initial page tables are covering the
+ * fixmap completely.
+ */
+ BUILD_BUG_ON(__end_of_permanent_fixed_addresses >
+ (FIXMAP_PMD_NUM * PTRS_PER_PTE));
+#endif
+
if (idx >= __end_of_fixed_addresses) {
BUG();
return;
diff --git a/arch/x86/mm/pkeys.c b/arch/x86/mm/pkeys.c
index 0bbec04..e2d2b3c 100644
--- a/arch/x86/mm/pkeys.c
+++ b/arch/x86/mm/pkeys.c
@@ -142,8 +142,7 @@
* Called from the FPU code when creating a fresh set of FPU
* registers. This is called from a very specific context where
* we know the FPU regstiers are safe for use and we can use PKRU
- * directly. The fact that PKRU is only available when we are
- * using eagerfpu mode makes this possible.
+ * directly.
*/
void copy_init_pkru_to_fpregs(void)
{
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index c92f75f..ebceaba 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1936,7 +1936,7 @@
* L3_k[511] -> level2_fixmap_pgt */
convert_pfn_mfn(level3_kernel_pgt);
- /* L3_k[511][506] -> level1_fixmap_pgt */
+ /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */
convert_pfn_mfn(level2_fixmap_pgt);
}
/* We get [511][511] and have Xen's version of level2_kernel_pgt */
@@ -1970,7 +1970,11 @@
set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
- set_page_prot(level1_fixmap_pgt, PAGE_KERNEL_RO);
+
+ for (i = 0; i < FIXMAP_PMD_NUM; i++) {
+ set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE,
+ PAGE_KERNEL_RO);
+ }
/* Pin down new L4 */
pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE,
diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c
index b9fc525..0b29a43 100644
--- a/arch/x86/xen/pmu.c
+++ b/arch/x86/xen/pmu.c
@@ -477,7 +477,7 @@
irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id)
{
int err, ret = IRQ_NONE;
- struct pt_regs regs;
+ struct pt_regs regs = {0};
const struct xen_pmu_data *xenpmu_data = get_xenpmu_data();
uint8_t xenpmu_flags = get_xenpmu_flags();
diff --git a/crypto/Kconfig b/crypto/Kconfig
index 64f50b7..4a6d18f 100644
--- a/crypto/Kconfig
+++ b/crypto/Kconfig
@@ -1444,20 +1444,6 @@
See also:
<http://www.cl.cam.ac.uk/~rja14/serpent.html>
-config CRYPTO_SPECK
- tristate "Speck cipher algorithm"
- select CRYPTO_ALGAPI
- help
- Speck is a lightweight block cipher that is tuned for optimal
- performance in software (rather than hardware).
-
- Speck may not be as secure as AES, and should only be used on systems
- where AES is not fast enough.
-
- See also: <https://eprint.iacr.org/2013/404.pdf>
-
- If unsure, say N.
-
config CRYPTO_TEA
tristate "TEA, XTEA and XETA cipher algorithms"
select CRYPTO_ALGAPI
diff --git a/crypto/Makefile b/crypto/Makefile
index 8a455d0..04f779d 100644
--- a/crypto/Makefile
+++ b/crypto/Makefile
@@ -106,7 +106,6 @@
obj-$(CONFIG_CRYPTO_KHAZAD) += khazad.o
obj-$(CONFIG_CRYPTO_ANUBIS) += anubis.o
obj-$(CONFIG_CRYPTO_SEED) += seed.o
-obj-$(CONFIG_CRYPTO_SPECK) += speck.o
obj-$(CONFIG_CRYPTO_SALSA20) += salsa20_generic.o
obj-$(CONFIG_CRYPTO_CHACHA20) += chacha20_generic.o
obj-$(CONFIG_CRYPTO_POLY1305) += poly1305_generic.o
diff --git a/crypto/ablkcipher.c b/crypto/ablkcipher.c
index 860c9e5..3bc0e76 100644
--- a/crypto/ablkcipher.c
+++ b/crypto/ablkcipher.c
@@ -367,6 +367,7 @@
strncpy(rblkcipher.type, "ablkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
@@ -441,6 +442,7 @@
strncpy(rblkcipher.type, "givcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_ablkcipher.geniv ?: "<built-in>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_ablkcipher.min_keysize;
diff --git a/crypto/blkcipher.c b/crypto/blkcipher.c
index 27f9866..59a0936 100644
--- a/crypto/blkcipher.c
+++ b/crypto/blkcipher.c
@@ -510,6 +510,7 @@
strncpy(rblkcipher.type, "blkcipher", sizeof(rblkcipher.type));
strncpy(rblkcipher.geniv, alg->cra_blkcipher.geniv ?: "<default>",
sizeof(rblkcipher.geniv));
+ rblkcipher.geniv[sizeof(rblkcipher.geniv) - 1] = '\0';
rblkcipher.blocksize = alg->cra_blocksize;
rblkcipher.min_keysize = alg->cra_blkcipher.min_keysize;
diff --git a/crypto/speck.c b/crypto/speck.c
deleted file mode 100644
index 58aa9f7..0000000
--- a/crypto/speck.c
+++ /dev/null
@@ -1,307 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Speck: a lightweight block cipher
- *
- * Copyright (c) 2018 Google, Inc
- *
- * Speck has 10 variants, including 5 block sizes. For now we only implement
- * the variants Speck128/128, Speck128/192, Speck128/256, Speck64/96, and
- * Speck64/128. Speck${B}/${K} denotes the variant with a block size of B bits
- * and a key size of K bits. The Speck128 variants are believed to be the most
- * secure variants, and they use the same block size and key sizes as AES. The
- * Speck64 variants are less secure, but on 32-bit processors are usually
- * faster. The remaining variants (Speck32, Speck48, and Speck96) are even less
- * secure and/or not as well suited for implementation on either 32-bit or
- * 64-bit processors, so are omitted.
- *
- * Reference: "The Simon and Speck Families of Lightweight Block Ciphers"
- * https://eprint.iacr.org/2013/404.pdf
- *
- * In a correspondence, the Speck designers have also clarified that the words
- * should be interpreted in little-endian format, and the words should be
- * ordered such that the first word of each block is 'y' rather than 'x', and
- * the first key word (rather than the last) becomes the first round key.
- */
-
-#include <asm/unaligned.h>
-#include <crypto/speck.h>
-#include <linux/bitops.h>
-#include <linux/crypto.h>
-#include <linux/init.h>
-#include <linux/module.h>
-
-/* Speck128 */
-
-static __always_inline void speck128_round(u64 *x, u64 *y, u64 k)
-{
- *x = ror64(*x, 8);
- *x += *y;
- *x ^= k;
- *y = rol64(*y, 3);
- *y ^= *x;
-}
-
-static __always_inline void speck128_unround(u64 *x, u64 *y, u64 k)
-{
- *y ^= *x;
- *y = ror64(*y, 3);
- *x ^= k;
- *x -= *y;
- *x = rol64(*x, 8);
-}
-
-void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u64 y = get_unaligned_le64(in);
- u64 x = get_unaligned_le64(in + 8);
- int i;
-
- for (i = 0; i < ctx->nrounds; i++)
- speck128_round(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le64(y, out);
- put_unaligned_le64(x, out + 8);
-}
-EXPORT_SYMBOL_GPL(crypto_speck128_encrypt);
-
-static void speck128_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck128_encrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u64 y = get_unaligned_le64(in);
- u64 x = get_unaligned_le64(in + 8);
- int i;
-
- for (i = ctx->nrounds - 1; i >= 0; i--)
- speck128_unround(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le64(y, out);
- put_unaligned_le64(x, out + 8);
-}
-EXPORT_SYMBOL_GPL(crypto_speck128_decrypt);
-
-static void speck128_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck128_decrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
- unsigned int keylen)
-{
- u64 l[3];
- u64 k;
- int i;
-
- switch (keylen) {
- case SPECK128_128_KEY_SIZE:
- k = get_unaligned_le64(key);
- l[0] = get_unaligned_le64(key + 8);
- ctx->nrounds = SPECK128_128_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck128_round(&l[0], &k, i);
- }
- break;
- case SPECK128_192_KEY_SIZE:
- k = get_unaligned_le64(key);
- l[0] = get_unaligned_le64(key + 8);
- l[1] = get_unaligned_le64(key + 16);
- ctx->nrounds = SPECK128_192_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck128_round(&l[i % 2], &k, i);
- }
- break;
- case SPECK128_256_KEY_SIZE:
- k = get_unaligned_le64(key);
- l[0] = get_unaligned_le64(key + 8);
- l[1] = get_unaligned_le64(key + 16);
- l[2] = get_unaligned_le64(key + 24);
- ctx->nrounds = SPECK128_256_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck128_round(&l[i % 3], &k, i);
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_speck128_setkey);
-
-static int speck128_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- return crypto_speck128_setkey(crypto_tfm_ctx(tfm), key, keylen);
-}
-
-/* Speck64 */
-
-static __always_inline void speck64_round(u32 *x, u32 *y, u32 k)
-{
- *x = ror32(*x, 8);
- *x += *y;
- *x ^= k;
- *y = rol32(*y, 3);
- *y ^= *x;
-}
-
-static __always_inline void speck64_unround(u32 *x, u32 *y, u32 k)
-{
- *y ^= *x;
- *y = ror32(*y, 3);
- *x ^= k;
- *x -= *y;
- *x = rol32(*x, 8);
-}
-
-void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u32 y = get_unaligned_le32(in);
- u32 x = get_unaligned_le32(in + 4);
- int i;
-
- for (i = 0; i < ctx->nrounds; i++)
- speck64_round(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le32(y, out);
- put_unaligned_le32(x, out + 4);
-}
-EXPORT_SYMBOL_GPL(crypto_speck64_encrypt);
-
-static void speck64_encrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck64_encrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in)
-{
- u32 y = get_unaligned_le32(in);
- u32 x = get_unaligned_le32(in + 4);
- int i;
-
- for (i = ctx->nrounds - 1; i >= 0; i--)
- speck64_unround(&x, &y, ctx->round_keys[i]);
-
- put_unaligned_le32(y, out);
- put_unaligned_le32(x, out + 4);
-}
-EXPORT_SYMBOL_GPL(crypto_speck64_decrypt);
-
-static void speck64_decrypt(struct crypto_tfm *tfm, u8 *out, const u8 *in)
-{
- crypto_speck64_decrypt(crypto_tfm_ctx(tfm), out, in);
-}
-
-int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
- unsigned int keylen)
-{
- u32 l[3];
- u32 k;
- int i;
-
- switch (keylen) {
- case SPECK64_96_KEY_SIZE:
- k = get_unaligned_le32(key);
- l[0] = get_unaligned_le32(key + 4);
- l[1] = get_unaligned_le32(key + 8);
- ctx->nrounds = SPECK64_96_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck64_round(&l[i % 2], &k, i);
- }
- break;
- case SPECK64_128_KEY_SIZE:
- k = get_unaligned_le32(key);
- l[0] = get_unaligned_le32(key + 4);
- l[1] = get_unaligned_le32(key + 8);
- l[2] = get_unaligned_le32(key + 12);
- ctx->nrounds = SPECK64_128_NROUNDS;
- for (i = 0; i < ctx->nrounds; i++) {
- ctx->round_keys[i] = k;
- speck64_round(&l[i % 3], &k, i);
- }
- break;
- default:
- return -EINVAL;
- }
-
- return 0;
-}
-EXPORT_SYMBOL_GPL(crypto_speck64_setkey);
-
-static int speck64_setkey(struct crypto_tfm *tfm, const u8 *key,
- unsigned int keylen)
-{
- return crypto_speck64_setkey(crypto_tfm_ctx(tfm), key, keylen);
-}
-
-/* Algorithm definitions */
-
-static struct crypto_alg speck_algs[] = {
- {
- .cra_name = "speck128",
- .cra_driver_name = "speck128-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = SPECK128_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct speck128_tfm_ctx),
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = SPECK128_128_KEY_SIZE,
- .cia_max_keysize = SPECK128_256_KEY_SIZE,
- .cia_setkey = speck128_setkey,
- .cia_encrypt = speck128_encrypt,
- .cia_decrypt = speck128_decrypt
- }
- }
- }, {
- .cra_name = "speck64",
- .cra_driver_name = "speck64-generic",
- .cra_priority = 100,
- .cra_flags = CRYPTO_ALG_TYPE_CIPHER,
- .cra_blocksize = SPECK64_BLOCK_SIZE,
- .cra_ctxsize = sizeof(struct speck64_tfm_ctx),
- .cra_module = THIS_MODULE,
- .cra_u = {
- .cipher = {
- .cia_min_keysize = SPECK64_96_KEY_SIZE,
- .cia_max_keysize = SPECK64_128_KEY_SIZE,
- .cia_setkey = speck64_setkey,
- .cia_encrypt = speck64_encrypt,
- .cia_decrypt = speck64_decrypt
- }
- }
- }
-};
-
-static int __init speck_module_init(void)
-{
- return crypto_register_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-static void __exit speck_module_exit(void)
-{
- crypto_unregister_algs(speck_algs, ARRAY_SIZE(speck_algs));
-}
-
-module_init(speck_module_init);
-module_exit(speck_module_exit);
-
-MODULE_DESCRIPTION("Speck block cipher (generic)");
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Eric Biggers <ebiggers@google.com>");
-MODULE_ALIAS_CRYPTO("speck128");
-MODULE_ALIAS_CRYPTO("speck128-generic");
-MODULE_ALIAS_CRYPTO("speck64");
-MODULE_ALIAS_CRYPTO("speck64-generic");
diff --git a/crypto/testmgr.c b/crypto/testmgr.c
index f1df5d2..e7c25ac 100644
--- a/crypto/testmgr.c
+++ b/crypto/testmgr.c
@@ -3238,36 +3238,6 @@
}
}
}, {
- .alg = "ecb(speck128)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = speck128_enc_tv_template,
- .count = ARRAY_SIZE(speck128_enc_tv_template)
- },
- .dec = {
- .vecs = speck128_dec_tv_template,
- .count = ARRAY_SIZE(speck128_dec_tv_template)
- }
- }
- }
- }, {
- .alg = "ecb(speck64)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = speck64_enc_tv_template,
- .count = ARRAY_SIZE(speck64_enc_tv_template)
- },
- .dec = {
- .vecs = speck64_dec_tv_template,
- .count = ARRAY_SIZE(speck64_dec_tv_template)
- }
- }
- }
- }, {
.alg = "ecb(tea)",
.test = alg_test_skcipher,
.suite = {
@@ -4088,36 +4058,6 @@
}
}
}, {
- .alg = "xts(speck128)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = speck128_xts_enc_tv_template,
- .count = ARRAY_SIZE(speck128_xts_enc_tv_template)
- },
- .dec = {
- .vecs = speck128_xts_dec_tv_template,
- .count = ARRAY_SIZE(speck128_xts_dec_tv_template)
- }
- }
- }
- }, {
- .alg = "xts(speck64)",
- .test = alg_test_skcipher,
- .suite = {
- .cipher = {
- .enc = {
- .vecs = speck64_xts_enc_tv_template,
- .count = ARRAY_SIZE(speck64_xts_enc_tv_template)
- },
- .dec = {
- .vecs = speck64_xts_dec_tv_template,
- .count = ARRAY_SIZE(speck64_xts_dec_tv_template)
- }
- }
- }
- }, {
.alg = "xts(twofish)",
.test = alg_test_skcipher,
.suite = {
diff --git a/crypto/testmgr.h b/crypto/testmgr.h
index 1f40d17..d7ab5de 100644
--- a/crypto/testmgr.h
+++ b/crypto/testmgr.h
@@ -13622,1492 +13622,6 @@
},
};
-/*
- * Speck test vectors taken from the original paper:
- * "The Simon and Speck Families of Lightweight Block Ciphers"
- * https://eprint.iacr.org/2013/404.pdf
- *
- * Note that the paper does not make byte and word order clear. But it was
- * confirmed with the authors that the intended orders are little endian byte
- * order and (y, x) word order. Equivalently, the printed test vectors, when
- * looking at only the bytes (ignoring the whitespace that divides them into
- * words), are backwards: the left-most byte is actually the one with the
- * highest memory address, while the right-most byte is actually the one with
- * the lowest memory address.
- */
-
-static struct cipher_testvec speck128_enc_tv_template[] = {
- { /* Speck128/128 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .klen = 16,
- .input = "\x20\x6d\x61\x64\x65\x20\x69\x74"
- "\x20\x65\x71\x75\x69\x76\x61\x6c",
- .ilen = 16,
- .result = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
- "\x65\x32\x78\x79\x51\x98\x5d\xa6",
- .rlen = 16,
- }, { /* Speck128/192 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17",
- .klen = 24,
- .input = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
- "\x68\x69\x65\x66\x20\x48\x61\x72",
- .ilen = 16,
- .result = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
- "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
- .rlen = 16,
- }, { /* Speck128/256 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
- .klen = 32,
- .input = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
- "\x49\x6e\x20\x74\x68\x6f\x73\x65",
- .ilen = 16,
- .result = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
- "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
- .rlen = 16,
- },
-};
-
-static struct cipher_testvec speck128_dec_tv_template[] = {
- { /* Speck128/128 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f",
- .klen = 16,
- .input = "\x18\x0d\x57\x5c\xdf\xfe\x60\x78"
- "\x65\x32\x78\x79\x51\x98\x5d\xa6",
- .ilen = 16,
- .result = "\x20\x6d\x61\x64\x65\x20\x69\x74"
- "\x20\x65\x71\x75\x69\x76\x61\x6c",
- .rlen = 16,
- }, { /* Speck128/192 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17",
- .klen = 24,
- .input = "\x86\x18\x3c\xe0\x5d\x18\xbc\xf9"
- "\x66\x55\x13\x13\x3a\xcf\xe4\x1b",
- .ilen = 16,
- .result = "\x65\x6e\x74\x20\x74\x6f\x20\x43"
- "\x68\x69\x65\x66\x20\x48\x61\x72",
- .rlen = 16,
- }, { /* Speck128/256 */
- .key = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f",
- .klen = 32,
- .input = "\x43\x8f\x18\x9c\x8d\xb4\xee\x4e"
- "\x3e\xf5\xc0\x05\x04\x01\x09\x41",
- .ilen = 16,
- .result = "\x70\x6f\x6f\x6e\x65\x72\x2e\x20"
- "\x49\x6e\x20\x74\x68\x6f\x73\x65",
- .rlen = 16,
- },
-};
-
-/*
- * Speck128-XTS test vectors, taken from the AES-XTS test vectors with the
- * result recomputed with Speck128 as the cipher
- */
-
-static struct cipher_testvec speck128_xts_enc_tv_template[] = {
- {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ilen = 32,
- .result = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
- "\x3b\x99\x4a\x64\x74\x77\xac\xed"
- "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
- "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
- .rlen = 32,
- }, {
- .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x22\x22\x22\x22\x22\x22\x22\x22"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 32,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ilen = 32,
- .result = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
- "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
- "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
- "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
- .rlen = 32,
- }, {
- .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
- "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
- "\x22\x22\x22\x22\x22\x22\x22\x22"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 32,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ilen = 32,
- .result = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
- "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
- "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
- "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
- .rlen = 32,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x31\x41\x59\x26\x53\x58\x97\x93"
- "\x23\x84\x62\x64\x33\x83\x27\x95",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ilen = 512,
- .result = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
- "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
- "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
- "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
- "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
- "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
- "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
- "\x19\xc5\x58\x84\x63\xb9\x12\x68"
- "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
- "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
- "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
- "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
- "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
- "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
- "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
- "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
- "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
- "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
- "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
- "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
- "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
- "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
- "\xa5\x20\xac\x24\x1c\x73\x59\x73"
- "\x58\x61\x3a\x87\x58\xb3\x20\x56"
- "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
- "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
- "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
- "\x09\x35\x71\x50\x65\xac\x92\xe3"
- "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
- "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
- "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
- "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
- "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
- "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
- "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
- "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
- "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
- "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
- "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
- "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
- "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
- "\x87\x30\xac\xd5\xea\x73\x49\x10"
- "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
- "\x66\x02\x35\x3d\x60\x06\x36\x4f"
- "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
- "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
- "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
- "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
- "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
- "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
- "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
- "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
- "\x51\x66\x02\x69\x04\x97\x36\xd4"
- "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
- "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
- "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
- "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
- "\x03\xe7\x05\x39\xf5\x05\x26\xee"
- "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
- "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
- "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
- "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
- "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
- "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
- .rlen = 512,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x62\x49\x77\x57\x24\x70\x93\x69"
- "\x99\x59\x57\x49\x66\x96\x76\x27"
- "\x31\x41\x59\x26\x53\x58\x97\x93"
- "\x23\x84\x62\x64\x33\x83\x27\x95"
- "\x02\x88\x41\x97\x16\x93\x99\x37"
- "\x51\x05\x82\x09\x74\x94\x45\x92",
- .klen = 64,
- .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ilen = 512,
- .result = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
- "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
- "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
- "\x92\x99\xde\xd3\x76\xed\xcd\x63"
- "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
- "\x79\x36\x31\x19\x62\xae\x10\x7e"
- "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
- "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
- "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
- "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
- "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
- "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
- "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
- "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
- "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
- "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
- "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
- "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
- "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
- "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
- "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
- "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
- "\xac\xa5\xd0\x38\xef\x19\x56\x53"
- "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
- "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
- "\xed\x22\x34\x1c\x5d\xed\x17\x06"
- "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
- "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
- "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
- "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
- "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
- "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
- "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
- "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
- "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
- "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
- "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
- "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
- "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
- "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
- "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
- "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
- "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
- "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
- "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
- "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
- "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
- "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
- "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
- "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
- "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
- "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
- "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
- "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
- "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
- "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
- "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
- "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
- "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
- "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
- "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
- "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
- "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
- "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
- .rlen = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
- }
-};
-
-static struct cipher_testvec speck128_xts_dec_tv_template[] = {
- {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\xbe\xa0\xe7\x03\xd7\xfe\xab\x62"
- "\x3b\x99\x4a\x64\x74\x77\xac\xed"
- "\xd8\xf4\xa6\xcf\xae\xb9\x07\x42"
- "\x51\xd9\xb6\x1d\xe0\x5e\xbc\x54",
- .ilen = 32,
- .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .rlen = 32,
- }, {
- .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x22\x22\x22\x22\x22\x22\x22\x22"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 32,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\xfb\x53\x81\x75\x6f\x9f\x34\xad"
- "\x7e\x01\xed\x7b\xcc\xda\x4e\x4a"
- "\xd4\x84\xa4\x53\xd5\x88\x73\x1b"
- "\xfd\xcb\xae\x0d\xf3\x04\xee\xe6",
- .ilen = 32,
- .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .rlen = 32,
- }, {
- .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
- "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
- "\x22\x22\x22\x22\x22\x22\x22\x22"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 32,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x21\x52\x84\x15\xd1\xf7\x21\x55"
- "\xd9\x75\x4a\xd3\xc5\xdb\x9f\x7d"
- "\xda\x63\xb2\xf1\x82\xb0\x89\x59"
- "\x86\xd4\xaa\xaa\xdd\xff\x4f\x92",
- .ilen = 32,
- .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .rlen = 32,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x31\x41\x59\x26\x53\x58\x97\x93"
- "\x23\x84\x62\x64\x33\x83\x27\x95",
- .klen = 32,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x57\xb5\xf8\x71\x6e\x6d\xdd\x82"
- "\x53\xd0\xed\x2d\x30\xc1\x20\xef"
- "\x70\x67\x5e\xff\x09\x70\xbb\xc1"
- "\x3a\x7b\x48\x26\xd9\x0b\xf4\x48"
- "\xbe\xce\xb1\xc7\xb2\x67\xc4\xa7"
- "\x76\xf8\x36\x30\xb7\xb4\x9a\xd9"
- "\xf5\x9d\xd0\x7b\xc1\x06\x96\x44"
- "\x19\xc5\x58\x84\x63\xb9\x12\x68"
- "\x68\xc7\xaa\x18\x98\xf2\x1f\x5c"
- "\x39\xa6\xd8\x32\x2b\xc3\x51\xfd"
- "\x74\x79\x2e\xb4\x44\xd7\x69\xc4"
- "\xfc\x29\xe6\xed\x26\x1e\xa6\x9d"
- "\x1c\xbe\x00\x0e\x7f\x3a\xca\xfb"
- "\x6d\x13\x65\xa0\xf9\x31\x12\xe2"
- "\x26\xd1\xec\x2b\x0a\x8b\x59\x99"
- "\xa7\x49\xa0\x0e\x09\x33\x85\x50"
- "\xc3\x23\xca\x7a\xdd\x13\x45\x5f"
- "\xde\x4c\xa7\xcb\x00\x8a\x66\x6f"
- "\xa2\xb6\xb1\x2e\xe1\xa0\x18\xf6"
- "\xad\xf3\xbd\xeb\xc7\xef\x55\x4f"
- "\x79\x91\x8d\x36\x13\x7b\xd0\x4a"
- "\x6c\x39\xfb\x53\xb8\x6f\x02\x51"
- "\xa5\x20\xac\x24\x1c\x73\x59\x73"
- "\x58\x61\x3a\x87\x58\xb3\x20\x56"
- "\x39\x06\x2b\x4d\xd3\x20\x2b\x89"
- "\x3f\xa2\xf0\x96\xeb\x7f\xa4\xcd"
- "\x11\xae\xbd\xcb\x3a\xb4\xd9\x91"
- "\x09\x35\x71\x50\x65\xac\x92\xe3"
- "\x7b\x32\xc0\x7a\xdd\xd4\xc3\x92"
- "\x6f\xeb\x79\xde\x6f\xd3\x25\xc9"
- "\xcd\x63\xf5\x1e\x7a\x3b\x26\x9d"
- "\x77\x04\x80\xa9\xbf\x38\xb5\xbd"
- "\xb8\x05\x07\xbd\xfd\xab\x7b\xf8"
- "\x2a\x26\xcc\x49\x14\x6d\x55\x01"
- "\x06\x94\xd8\xb2\x2d\x53\x83\x1b"
- "\x8f\xd4\xdd\x57\x12\x7e\x18\xba"
- "\x8e\xe2\x4d\x80\xef\x7e\x6b\x9d"
- "\x24\xa9\x60\xa4\x97\x85\x86\x2a"
- "\x01\x00\x09\xf1\xcb\x4a\x24\x1c"
- "\xd8\xf6\xe6\x5b\xe7\x5d\xf2\xc4"
- "\x97\x1c\x10\xc6\x4d\x66\x4f\x98"
- "\x87\x30\xac\xd5\xea\x73\x49\x10"
- "\x80\xea\xe5\x5f\x4d\x5f\x03\x33"
- "\x66\x02\x35\x3d\x60\x06\x36\x4f"
- "\x14\x1c\xd8\x07\x1f\x78\xd0\xf8"
- "\x4f\x6c\x62\x7c\x15\xa5\x7c\x28"
- "\x7c\xcc\xeb\x1f\xd1\x07\x90\x93"
- "\x7e\xc2\xa8\x3a\x80\xc0\xf5\x30"
- "\xcc\x75\xcf\x16\x26\xa9\x26\x3b"
- "\xe7\x68\x2f\x15\x21\x5b\xe4\x00"
- "\xbd\x48\x50\xcd\x75\x70\xc4\x62"
- "\xbb\x41\xfb\x89\x4a\x88\x3b\x3b"
- "\x51\x66\x02\x69\x04\x97\x36\xd4"
- "\x75\xae\x0b\xa3\x42\xf8\xca\x79"
- "\x8f\x93\xe9\xcc\x38\xbd\xd6\xd2"
- "\xf9\x70\x4e\xc3\x6a\x8e\x25\xbd"
- "\xea\x15\x5a\xa0\x85\x7e\x81\x0d"
- "\x03\xe7\x05\x39\xf5\x05\x26\xee"
- "\xec\xaa\x1f\x3d\xc9\x98\x76\x01"
- "\x2c\xf4\xfc\xa3\x88\x77\x38\xc4"
- "\x50\x65\x50\x6d\x04\x1f\xdf\x5a"
- "\xaa\xf2\x01\xa9\xc1\x8d\xee\xca"
- "\x47\x26\xef\x39\xb8\xb4\xf2\xd1"
- "\xd6\xbb\x1b\x2a\xc1\x34\x14\xcf",
- .ilen = 512,
- .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .rlen = 512,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x62\x49\x77\x57\x24\x70\x93\x69"
- "\x99\x59\x57\x49\x66\x96\x76\x27"
- "\x31\x41\x59\x26\x53\x58\x97\x93"
- "\x23\x84\x62\x64\x33\x83\x27\x95"
- "\x02\x88\x41\x97\x16\x93\x99\x37"
- "\x51\x05\x82\x09\x74\x94\x45\x92",
- .klen = 64,
- .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\xc5\x85\x2a\x4b\x73\xe4\xf6\xf1"
- "\x7e\xf9\xf6\xe9\xa3\x73\x36\xcb"
- "\xaa\xb6\x22\xb0\x24\x6e\x3d\x73"
- "\x92\x99\xde\xd3\x76\xed\xcd\x63"
- "\x64\x3a\x22\x57\xc1\x43\x49\xd4"
- "\x79\x36\x31\x19\x62\xae\x10\x7e"
- "\x7d\xcf\x7a\xe2\x6b\xce\x27\xfa"
- "\xdc\x3d\xd9\x83\xd3\x42\x4c\xe0"
- "\x1b\xd6\x1d\x1a\x6f\xd2\x03\x00"
- "\xfc\x81\x99\x8a\x14\x62\xf5\x7e"
- "\x0d\xe7\x12\xe8\x17\x9d\x0b\xec"
- "\xe2\xf7\xc9\xa7\x63\xd1\x79\xb6"
- "\x62\x62\x37\xfe\x0a\x4c\x4a\x37"
- "\x70\xc7\x5e\x96\x5f\xbc\x8e\x9e"
- "\x85\x3c\x4f\x26\x64\x85\xbc\x68"
- "\xb0\xe0\x86\x5e\x26\x41\xce\x11"
- "\x50\xda\x97\x14\xe9\x9e\xc7\x6d"
- "\x3b\xdc\x43\xde\x2b\x27\x69\x7d"
- "\xfc\xb0\x28\xbd\x8f\xb1\xc6\x31"
- "\x14\x4d\xf0\x74\x37\xfd\x07\x25"
- "\x96\x55\xe5\xfc\x9e\x27\x2a\x74"
- "\x1b\x83\x4d\x15\x83\xac\x57\xa0"
- "\xac\xa5\xd0\x38\xef\x19\x56\x53"
- "\x25\x4b\xfc\xce\x04\x23\xe5\x6b"
- "\xf6\xc6\x6c\x32\x0b\xb3\x12\xc5"
- "\xed\x22\x34\x1c\x5d\xed\x17\x06"
- "\x36\xa3\xe6\x77\xb9\x97\x46\xb8"
- "\xe9\x3f\x7e\xc7\xbc\x13\x5c\xdc"
- "\x6e\x3f\x04\x5e\xd1\x59\xa5\x82"
- "\x35\x91\x3d\x1b\xe4\x97\x9f\x92"
- "\x1c\x5e\x5f\x6f\x41\xd4\x62\xa1"
- "\x8d\x39\xfc\x42\xfb\x38\x80\xb9"
- "\x0a\xe3\xcc\x6a\x93\xd9\x7a\xb1"
- "\xe9\x69\xaf\x0a\x6b\x75\x38\xa7"
- "\xa1\xbf\xf7\xda\x95\x93\x4b\x78"
- "\x19\xf5\x94\xf9\xd2\x00\x33\x37"
- "\xcf\xf5\x9e\x9c\xf3\xcc\xa6\xee"
- "\x42\xb2\x9e\x2c\x5f\x48\x23\x26"
- "\x15\x25\x17\x03\x3d\xfe\x2c\xfc"
- "\xeb\xba\xda\xe0\x00\x05\xb6\xa6"
- "\x07\xb3\xe8\x36\x5b\xec\x5b\xbf"
- "\xd6\x5b\x00\x74\xc6\x97\xf1\x6a"
- "\x49\xa1\xc3\xfa\x10\x52\xb9\x14"
- "\xad\xb7\x73\xf8\x78\x12\xc8\x59"
- "\x17\x80\x4c\x57\x39\xf1\x6d\x80"
- "\x25\x77\x0f\x5e\x7d\xf0\xaf\x21"
- "\xec\xce\xb7\xc8\x02\x8a\xed\x53"
- "\x2c\x25\x68\x2e\x1f\x85\x5e\x67"
- "\xd1\x07\x7a\x3a\x89\x08\xe0\x34"
- "\xdc\xdb\x26\xb4\x6b\x77\xfc\x40"
- "\x31\x15\x72\xa0\xf0\x73\xd9\x3b"
- "\xd5\xdb\xfe\xfc\x8f\xa9\x44\xa2"
- "\x09\x9f\xc6\x33\xe5\xe2\x88\xe8"
- "\xf3\xf0\x1a\xf4\xce\x12\x0f\xd6"
- "\xf7\x36\xe6\xa4\xf4\x7a\x10\x58"
- "\xcc\x1f\x48\x49\x65\x47\x75\xe9"
- "\x28\xe1\x65\x7b\xf2\xc4\xb5\x07"
- "\xf2\xec\x76\xd8\x8f\x09\xf3\x16"
- "\xa1\x51\x89\x3b\xeb\x96\x42\xac"
- "\x65\xe0\x67\x63\x29\xdc\xb4\x7d"
- "\xf2\x41\x51\x6a\xcb\xde\x3c\xfb"
- "\x66\x8d\x13\xca\xe0\x59\x2a\x00"
- "\xc9\x53\x4c\xe6\x9e\xe2\x73\xd5"
- "\x67\x19\xb2\xbd\x9a\x63\xd7\x5c",
- .ilen = 512,
- .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .rlen = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
- }
-};
-
-static struct cipher_testvec speck64_enc_tv_template[] = {
- { /* Speck64/96 */
- .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
- "\x10\x11\x12\x13",
- .klen = 12,
- .input = "\x65\x61\x6e\x73\x20\x46\x61\x74",
- .ilen = 8,
- .result = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
- .rlen = 8,
- }, { /* Speck64/128 */
- .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
- "\x10\x11\x12\x13\x18\x19\x1a\x1b",
- .klen = 16,
- .input = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
- .ilen = 8,
- .result = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
- .rlen = 8,
- },
-};
-
-static struct cipher_testvec speck64_dec_tv_template[] = {
- { /* Speck64/96 */
- .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
- "\x10\x11\x12\x13",
- .klen = 12,
- .input = "\x6c\x94\x75\x41\xec\x52\x79\x9f",
- .ilen = 8,
- .result = "\x65\x61\x6e\x73\x20\x46\x61\x74",
- .rlen = 8,
- }, { /* Speck64/128 */
- .key = "\x00\x01\x02\x03\x08\x09\x0a\x0b"
- "\x10\x11\x12\x13\x18\x19\x1a\x1b",
- .klen = 16,
- .input = "\x8b\x02\x4e\x45\x48\xa5\x6f\x8c",
- .ilen = 8,
- .result = "\x2d\x43\x75\x74\x74\x65\x72\x3b",
- .rlen = 8,
- },
-};
-
-/*
- * Speck64-XTS test vectors, taken from the AES-XTS test vectors with the result
- * recomputed with Speck64 as the cipher, and key lengths adjusted
- */
-
-static struct cipher_testvec speck64_xts_enc_tv_template[] = {
- {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 24,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .ilen = 32,
- .result = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
- "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
- "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
- "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
- .rlen = 32,
- }, {
- .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 24,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ilen = 32,
- .result = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
- "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
- "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
- "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
- .rlen = 32,
- }, {
- .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
- "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 24,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .ilen = 32,
- .result = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
- "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
- "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
- "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
- .rlen = 32,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x31\x41\x59\x26\x53\x58\x97\x93",
- .klen = 24,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ilen = 512,
- .result = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
- "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
- "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
- "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
- "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
- "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
- "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
- "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
- "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
- "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
- "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
- "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
- "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
- "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
- "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
- "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
- "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
- "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
- "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
- "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
- "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
- "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
- "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
- "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
- "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
- "\x07\xff\xf3\x72\x74\x48\xb5\x40"
- "\x50\xb5\xdd\x90\x43\x31\x18\x15"
- "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
- "\x29\x93\x90\x8b\xda\x07\xf0\x35"
- "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
- "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
- "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
- "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
- "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
- "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
- "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
- "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
- "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
- "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
- "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
- "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
- "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
- "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
- "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
- "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
- "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
- "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
- "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
- "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
- "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
- "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
- "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
- "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
- "\x59\xda\xee\x1a\x22\x18\xda\x0d"
- "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
- "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
- "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
- "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
- "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
- "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
- "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
- "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
- "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
- "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
- .rlen = 512,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x62\x49\x77\x57\x24\x70\x93\x69"
- "\x99\x59\x57\x49\x66\x96\x76\x27",
- .klen = 32,
- .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .ilen = 512,
- .result = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
- "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
- "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
- "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
- "\x78\x16\xea\x80\xdb\x33\x75\x94"
- "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
- "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
- "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
- "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
- "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
- "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
- "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
- "\x84\x5e\x46\xed\x20\x89\xb6\x44"
- "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
- "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
- "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
- "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
- "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
- "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
- "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
- "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
- "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
- "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
- "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
- "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
- "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
- "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
- "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
- "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
- "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
- "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
- "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
- "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
- "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
- "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
- "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
- "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
- "\x52\x7f\x29\x51\x94\x20\x67\x3c"
- "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
- "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
- "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
- "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
- "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
- "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
- "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
- "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
- "\x65\x53\x0f\x41\x11\xbd\x98\x99"
- "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
- "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
- "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
- "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
- "\x63\x51\x34\x9d\x96\x12\xae\xce"
- "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
- "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
- "\x54\x27\x74\xbb\x10\x86\x57\x46"
- "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
- "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
- "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
- "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
- "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
- "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
- "\x9b\x63\x76\x32\x2f\x19\x72\x10"
- "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
- "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
- .rlen = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
- }
-};
-
-static struct cipher_testvec speck64_xts_dec_tv_template[] = {
- {
- .key = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .klen = 24,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x84\xaf\x54\x07\x19\xd4\x7c\xa6"
- "\xe4\xfe\xdf\xc4\x1f\x34\xc3\xc2"
- "\x80\xf5\x72\xe7\xcd\xf0\x99\x22"
- "\x35\xa7\x2f\x06\xef\xdc\x51\xaa",
- .ilen = 32,
- .result = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .rlen = 32,
- }, {
- .key = "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x11\x11\x11\x11\x11\x11\x11\x11"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 24,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x12\x56\x73\xcd\x15\x87\xa8\x59"
- "\xcf\x84\xae\xd9\x1c\x66\xd6\x9f"
- "\xb3\x12\x69\x7e\x36\xeb\x52\xff"
- "\x62\xdd\xba\x90\xb3\xe1\xee\x99",
- .ilen = 32,
- .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .rlen = 32,
- }, {
- .key = "\xff\xfe\xfd\xfc\xfb\xfa\xf9\xf8"
- "\xf7\xf6\xf5\xf4\xf3\xf2\xf1\xf0"
- "\x22\x22\x22\x22\x22\x22\x22\x22",
- .klen = 24,
- .iv = "\x33\x33\x33\x33\x33\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x15\x1b\xe4\x2c\xa2\x5a\x2d\x2c"
- "\x27\x36\xc0\xbf\x5d\xea\x36\x37"
- "\x2d\x1a\x88\xbc\x66\xb5\xd0\x0b"
- "\xa1\xbc\x19\xb2\x0f\x3b\x75\x34",
- .ilen = 32,
- .result = "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44"
- "\x44\x44\x44\x44\x44\x44\x44\x44",
- .rlen = 32,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x31\x41\x59\x26\x53\x58\x97\x93",
- .klen = 24,
- .iv = "\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\xaf\xa1\x81\xa6\x32\xbb\x15\x8e"
- "\xf8\x95\x2e\xd3\xe6\xee\x7e\x09"
- "\x0c\x1a\xf5\x02\x97\x8b\xe3\xb3"
- "\x11\xc7\x39\x96\xd0\x95\xf4\x56"
- "\xf4\xdd\x03\x38\x01\x44\x2c\xcf"
- "\x88\xae\x8e\x3c\xcd\xe7\xaa\x66"
- "\xfe\x3d\xc6\xfb\x01\x23\x51\x43"
- "\xd5\xd2\x13\x86\x94\x34\xe9\x62"
- "\xf9\x89\xe3\xd1\x7b\xbe\xf8\xef"
- "\x76\x35\x04\x3f\xdb\x23\x9d\x0b"
- "\x85\x42\xb9\x02\xd6\xcc\xdb\x96"
- "\xa7\x6b\x27\xb6\xd4\x45\x8f\x7d"
- "\xae\xd2\x04\xd5\xda\xc1\x7e\x24"
- "\x8c\x73\xbe\x48\x7e\xcf\x65\x28"
- "\x29\xe5\xbe\x54\x30\xcb\x46\x95"
- "\x4f\x2e\x8a\x36\xc8\x27\xc5\xbe"
- "\xd0\x1a\xaf\xab\x26\xcd\x9e\x69"
- "\xa1\x09\x95\x71\x26\xe9\xc4\xdf"
- "\xe6\x31\xc3\x46\xda\xaf\x0b\x41"
- "\x1f\xab\xb1\x8e\xd6\xfc\x0b\xb3"
- "\x82\xc0\x37\x27\xfc\x91\xa7\x05"
- "\xfb\xc5\xdc\x2b\x74\x96\x48\x43"
- "\x5d\x9c\x19\x0f\x60\x63\x3a\x1f"
- "\x6f\xf0\x03\xbe\x4d\xfd\xc8\x4a"
- "\xc6\xa4\x81\x6d\xc3\x12\x2a\x5c"
- "\x07\xff\xf3\x72\x74\x48\xb5\x40"
- "\x50\xb5\xdd\x90\x43\x31\x18\x15"
- "\x7b\xf2\xa6\xdb\x83\xc8\x4b\x4a"
- "\x29\x93\x90\x8b\xda\x07\xf0\x35"
- "\x6d\x90\x88\x09\x4e\x83\xf5\x5b"
- "\x94\x12\xbb\x33\x27\x1d\x3f\x23"
- "\x51\xa8\x7c\x07\xa2\xae\x77\xa6"
- "\x50\xfd\xcc\xc0\x4f\x80\x7a\x9f"
- "\x66\xdd\xcd\x75\x24\x8b\x33\xf7"
- "\x20\xdb\x83\x9b\x4f\x11\x63\x6e"
- "\xcf\x37\xef\xc9\x11\x01\x5c\x45"
- "\x32\x99\x7c\x3c\x9e\x42\x89\xe3"
- "\x70\x6d\x15\x9f\xb1\xe6\xb6\x05"
- "\xfe\x0c\xb9\x49\x2d\x90\x6d\xcc"
- "\x5d\x3f\xc1\xfe\x89\x0a\x2e\x2d"
- "\xa0\xa8\x89\x3b\x73\x39\xa5\x94"
- "\x4c\xa4\xa6\xbb\xa7\x14\x46\x89"
- "\x10\xff\xaf\xef\xca\xdd\x4f\x80"
- "\xb3\xdf\x3b\xab\xd4\xe5\x5a\xc7"
- "\x33\xca\x00\x8b\x8b\x3f\xea\xec"
- "\x68\x8a\xc2\x6d\xfd\xd4\x67\x0f"
- "\x22\x31\xe1\x0e\xfe\x5a\x04\xd5"
- "\x64\xa3\xf1\x1a\x76\x28\xcc\x35"
- "\x36\xa7\x0a\x74\xf7\x1c\x44\x9b"
- "\xc7\x1b\x53\x17\x02\xea\xd1\xad"
- "\x13\x51\x73\xc0\xa0\xb2\x05\x32"
- "\xa8\xa2\x37\x2e\xe1\x7a\x3a\x19"
- "\x26\xb4\x6c\x62\x5d\xb3\x1a\x1d"
- "\x59\xda\xee\x1a\x22\x18\xda\x0d"
- "\x88\x0f\x55\x8b\x72\x62\xfd\xc1"
- "\x69\x13\xcd\x0d\x5f\xc1\x09\x52"
- "\xee\xd6\xe3\x84\x4d\xee\xf6\x88"
- "\xaf\x83\xdc\x76\xf4\xc0\x93\x3f"
- "\x4a\x75\x2f\xb0\x0b\x3e\xc4\x54"
- "\x7d\x69\x8d\x00\x62\x77\x0d\x14"
- "\xbe\x7c\xa6\x7d\xc5\x24\x4f\xf3"
- "\x50\xf7\x5f\xf4\xc2\xca\x41\x97"
- "\x37\xbe\x75\x74\xcd\xf0\x75\x6e"
- "\x25\x23\x94\xbd\xda\x8d\xb0\xd4",
- .ilen = 512,
- .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .rlen = 512,
- }, {
- .key = "\x27\x18\x28\x18\x28\x45\x90\x45"
- "\x23\x53\x60\x28\x74\x71\x35\x26"
- "\x62\x49\x77\x57\x24\x70\x93\x69"
- "\x99\x59\x57\x49\x66\x96\x76\x27",
- .klen = 32,
- .iv = "\xff\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00",
- .input = "\x55\xed\x71\xd3\x02\x8e\x15\x3b"
- "\xc6\x71\x29\x2d\x3e\x89\x9f\x59"
- "\x68\x6a\xcc\x8a\x56\x97\xf3\x95"
- "\x4e\x51\x08\xda\x2a\xf8\x6f\x3c"
- "\x78\x16\xea\x80\xdb\x33\x75\x94"
- "\xf9\x29\xc4\x2b\x76\x75\x97\xc7"
- "\xf2\x98\x2c\xf9\xff\xc8\xd5\x2b"
- "\x18\xf1\xaf\xcf\x7c\xc5\x0b\xee"
- "\xad\x3c\x76\x7c\xe6\x27\xa2\x2a"
- "\xe4\x66\xe1\xab\xa2\x39\xfc\x7c"
- "\xf5\xec\x32\x74\xa3\xb8\x03\x88"
- "\x52\xfc\x2e\x56\x3f\xa1\xf0\x9f"
- "\x84\x5e\x46\xed\x20\x89\xb6\x44"
- "\x8d\xd0\xed\x54\x47\x16\xbe\x95"
- "\x8a\xb3\x6b\x72\xc4\x32\x52\x13"
- "\x1b\xb0\x82\xbe\xac\xf9\x70\xa6"
- "\x44\x18\xdd\x8c\x6e\xca\x6e\x45"
- "\x8f\x1e\x10\x07\x57\x25\x98\x7b"
- "\x17\x8c\x78\xdd\x80\xa7\xd9\xd8"
- "\x63\xaf\xb9\x67\x57\xfd\xbc\xdb"
- "\x44\xe9\xc5\x65\xd1\xc7\x3b\xff"
- "\x20\xa0\x80\x1a\xc3\x9a\xad\x5e"
- "\x5d\x3b\xd3\x07\xd9\xf5\xfd\x3d"
- "\x4a\x8b\xa8\xd2\x6e\x7a\x51\x65"
- "\x6c\x8e\x95\xe0\x45\xc9\x5f\x4a"
- "\x09\x3c\x3d\x71\x7f\x0c\x84\x2a"
- "\xc8\x48\x52\x1a\xc2\xd5\xd6\x78"
- "\x92\x1e\xa0\x90\x2e\xea\xf0\xf3"
- "\xdc\x0f\xb1\xaf\x0d\x9b\x06\x2e"
- "\x35\x10\x30\x82\x0d\xe7\xc5\x9b"
- "\xde\x44\x18\xbd\x9f\xd1\x45\xa9"
- "\x7b\x7a\x4a\xad\x35\x65\x27\xca"
- "\xb2\xc3\xd4\x9b\x71\x86\x70\xee"
- "\xf1\x89\x3b\x85\x4b\x5b\xaa\xaf"
- "\xfc\x42\xc8\x31\x59\xbe\x16\x60"
- "\x4f\xf9\xfa\x12\xea\xd0\xa7\x14"
- "\xf0\x7a\xf3\xd5\x8d\xbd\x81\xef"
- "\x52\x7f\x29\x51\x94\x20\x67\x3c"
- "\xd1\xaf\x77\x9f\x22\x5a\x4e\x63"
- "\xe7\xff\x73\x25\xd1\xdd\x96\x8a"
- "\x98\x52\x6d\xf3\xac\x3e\xf2\x18"
- "\x6d\xf6\x0a\x29\xa6\x34\x3d\xed"
- "\xe3\x27\x0d\x9d\x0a\x02\x44\x7e"
- "\x5a\x7e\x67\x0f\x0a\x9e\xd6\xad"
- "\x91\xe6\x4d\x81\x8c\x5c\x59\xaa"
- "\xfb\xeb\x56\x53\xd2\x7d\x4c\x81"
- "\x65\x53\x0f\x41\x11\xbd\x98\x99"
- "\xf9\xc6\xfa\x51\x2e\xa3\xdd\x8d"
- "\x84\x98\xf9\x34\xed\x33\x2a\x1f"
- "\x82\xed\xc1\x73\x98\xd3\x02\xdc"
- "\xe6\xc2\x33\x1d\xa2\xb4\xca\x76"
- "\x63\x51\x34\x9d\x96\x12\xae\xce"
- "\x83\xc9\x76\x5e\xa4\x1b\x53\x37"
- "\x17\xd5\xc0\x80\x1d\x62\xf8\x3d"
- "\x54\x27\x74\xbb\x10\x86\x57\x46"
- "\x68\xe1\xed\x14\xe7\x9d\xfc\x84"
- "\x47\xbc\xc2\xf8\x19\x4b\x99\xcf"
- "\x7a\xe9\xc4\xb8\x8c\x82\x72\x4d"
- "\x7b\x4f\x38\x55\x36\x71\x64\xc1"
- "\xfc\x5c\x75\x52\x33\x02\x18\xf8"
- "\x17\xe1\x2b\xc2\x43\x39\xbd\x76"
- "\x9b\x63\x76\x32\x2f\x19\x72\x10"
- "\x9f\x21\x0c\xf1\x66\x50\x7f\xa5"
- "\x0d\x1f\x46\xe0\xba\xd3\x2f\x3c",
- .ilen = 512,
- .result = "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff"
- "\x00\x01\x02\x03\x04\x05\x06\x07"
- "\x08\x09\x0a\x0b\x0c\x0d\x0e\x0f"
- "\x10\x11\x12\x13\x14\x15\x16\x17"
- "\x18\x19\x1a\x1b\x1c\x1d\x1e\x1f"
- "\x20\x21\x22\x23\x24\x25\x26\x27"
- "\x28\x29\x2a\x2b\x2c\x2d\x2e\x2f"
- "\x30\x31\x32\x33\x34\x35\x36\x37"
- "\x38\x39\x3a\x3b\x3c\x3d\x3e\x3f"
- "\x40\x41\x42\x43\x44\x45\x46\x47"
- "\x48\x49\x4a\x4b\x4c\x4d\x4e\x4f"
- "\x50\x51\x52\x53\x54\x55\x56\x57"
- "\x58\x59\x5a\x5b\x5c\x5d\x5e\x5f"
- "\x60\x61\x62\x63\x64\x65\x66\x67"
- "\x68\x69\x6a\x6b\x6c\x6d\x6e\x6f"
- "\x70\x71\x72\x73\x74\x75\x76\x77"
- "\x78\x79\x7a\x7b\x7c\x7d\x7e\x7f"
- "\x80\x81\x82\x83\x84\x85\x86\x87"
- "\x88\x89\x8a\x8b\x8c\x8d\x8e\x8f"
- "\x90\x91\x92\x93\x94\x95\x96\x97"
- "\x98\x99\x9a\x9b\x9c\x9d\x9e\x9f"
- "\xa0\xa1\xa2\xa3\xa4\xa5\xa6\xa7"
- "\xa8\xa9\xaa\xab\xac\xad\xae\xaf"
- "\xb0\xb1\xb2\xb3\xb4\xb5\xb6\xb7"
- "\xb8\xb9\xba\xbb\xbc\xbd\xbe\xbf"
- "\xc0\xc1\xc2\xc3\xc4\xc5\xc6\xc7"
- "\xc8\xc9\xca\xcb\xcc\xcd\xce\xcf"
- "\xd0\xd1\xd2\xd3\xd4\xd5\xd6\xd7"
- "\xd8\xd9\xda\xdb\xdc\xdd\xde\xdf"
- "\xe0\xe1\xe2\xe3\xe4\xe5\xe6\xe7"
- "\xe8\xe9\xea\xeb\xec\xed\xee\xef"
- "\xf0\xf1\xf2\xf3\xf4\xf5\xf6\xf7"
- "\xf8\xf9\xfa\xfb\xfc\xfd\xfe\xff",
- .rlen = 512,
- .also_non_np = 1,
- .np = 3,
- .tap = { 512 - 20, 4, 16 },
- }
-};
-
/* Cast6 test vectors from RFC 2612 */
#define CAST6_ENC_TEST_VECTORS 4
#define CAST6_DEC_TEST_VECTORS 4
diff --git a/drivers/Kconfig b/drivers/Kconfig
index 9480d84..5960816 100644
--- a/drivers/Kconfig
+++ b/drivers/Kconfig
@@ -138,6 +138,8 @@
source "drivers/xen/Kconfig"
+source "drivers/vservices/Kconfig"
+
source "drivers/staging/Kconfig"
source "drivers/platform/Kconfig"
diff --git a/drivers/Makefile b/drivers/Makefile
index 06e2bb4..557cba5 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -10,6 +10,8 @@
obj-$(CONFIG_GENERIC_PHY) += phy/
+obj-$(CONFIG_VSERVICES_SUPPORT) += vservices/
+
# GPIO must come after pinctrl as gpios may need to mux pins etc
obj-$(CONFIG_PINCTRL) += pinctrl/
obj-$(CONFIG_GPIOLIB) += gpio/
diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c
index 09e77d5..4ecce1f 100644
--- a/drivers/base/dma-removed.c
+++ b/drivers/base/dma-removed.c
@@ -223,7 +223,7 @@
bool skip_zeroing = attrs & DMA_ATTR_SKIP_ZEROING;
int pageno;
unsigned long order;
- void *addr = NULL;
+ void __iomem *addr = NULL;
struct removed_region *dma_mem = dev->removed_mem;
int nbits;
unsigned int align;
@@ -261,7 +261,7 @@
goto out;
}
- addr = ioremap(base, size);
+ addr = ioremap_wc(base, size);
if (WARN_ON(!addr)) {
bitmap_clear(dma_mem->bitmap, pageno, nbits);
} else {
@@ -355,10 +355,10 @@
{
}
-void *removed_remap(struct device *dev, void *cpu_addr, dma_addr_t handle,
- size_t size, unsigned long attrs)
+static void __iomem *removed_remap(struct device *dev, void *cpu_addr,
+ dma_addr_t handle, size_t size, unsigned long attrs)
{
- return ioremap(handle, size);
+ return ioremap_wc(handle, size);
}
void removed_unremap(struct device *dev, void *remapped_address, size_t size)
diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
index dc259d2..574d08f 100644
--- a/drivers/base/power/main.c
+++ b/drivers/base/power/main.c
@@ -1362,8 +1362,10 @@
dpm_wait_for_children(dev, async);
- if (async_error)
+ if (async_error) {
+ dev->power.direct_complete = false;
goto Complete;
+ }
/*
* If a device configured to wake up the system from sleep states
@@ -1378,6 +1380,7 @@
pm_get_active_wakeup_sources(suspend_abort,
MAX_SUSPEND_ABORT_LEN);
log_suspend_abort_reason(suspend_abort);
+ dev->power.direct_complete = false;
async_error = -EBUSY;
goto Complete;
}
diff --git a/drivers/block/Kconfig b/drivers/block/Kconfig
index 39dd30b..64d95c9 100644
--- a/drivers/block/Kconfig
+++ b/drivers/block/Kconfig
@@ -538,3 +538,23 @@
module will be called rsxx.
endif # BLK_DEV
+
+config VSERVICES_BLOCK_SERVER
+ tristate "Virtual Services block server"
+ depends on BLOCK && VSERVICES_SUPPORT && VSERVICES_SERVER
+ default y
+ select VSERVICES_PROTOCOL_BLOCK_SERVER
+ help
+ Select this option if you want support for server side Virtual
+ Services block. This allows any Linux block device to be
+ virtualized and exported as a virtual service.
+
+config VSERVICES_BLOCK_CLIENT
+ tristate "Virtual Services Block client device"
+ depends on BLOCK && VSERVICES_SUPPORT && VSERVICES_CLIENT
+ default y
+ select VSERVICES_PROTOCOL_BLOCK_CLIENT
+ help
+ Select this option if you want support for client side Virtual
+ Services block devices. The virtual block devices are typically
+ named /dev/vblock0, /dev/vblock1, etc.
diff --git a/drivers/block/Makefile b/drivers/block/Makefile
index 1e9661e..fe9229f1 100644
--- a/drivers/block/Makefile
+++ b/drivers/block/Makefile
@@ -44,3 +44,8 @@
skd-y := skd_main.o
swim_mod-y := swim.o swim_asm.o
+
+obj-$(CONFIG_VSERVICES_BLOCK_SERVER) += vs_block_server.o
+CFLAGS_vs_block_server.o += -Werror
+obj-$(CONFIG_VSERVICES_BLOCK_CLIENT) += vs_block_client.o
+CFLAGS_vs_block_client.o += -Werror
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index e3d8e4c..a321d7d 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3459,6 +3459,9 @@
(struct floppy_struct **)&outparam);
if (ret)
return ret;
+ memcpy(&inparam.g, outparam,
+ offsetof(struct floppy_struct, name));
+ outparam = &inparam.g;
break;
case FDMSGON:
UDP->flags |= FTD_MSG;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index 9f840d9..a08c223 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -1059,6 +1059,7 @@
memset(lo->lo_encrypt_key, 0, LO_KEY_SIZE);
memset(lo->lo_crypt_name, 0, LO_NAME_SIZE);
memset(lo->lo_file_name, 0, LO_NAME_SIZE);
+ blk_queue_logical_block_size(lo->lo_queue, 512);
if (bdev) {
bdput(bdev);
invalidate_bdev(bdev);
@@ -1344,6 +1345,24 @@
return error;
}
+static int loop_set_block_size(struct loop_device *lo, unsigned long arg)
+{
+ if (lo->lo_state != Lo_bound)
+ return -ENXIO;
+
+ if (arg < 512 || arg > PAGE_SIZE || !is_power_of_2(arg))
+ return -EINVAL;
+
+ blk_mq_freeze_queue(lo->lo_queue);
+
+ blk_queue_logical_block_size(lo->lo_queue, arg);
+ loop_update_dio(lo);
+
+ blk_mq_unfreeze_queue(lo->lo_queue);
+
+ return 0;
+}
+
static int lo_ioctl(struct block_device *bdev, fmode_t mode,
unsigned int cmd, unsigned long arg)
{
@@ -1392,6 +1411,11 @@
if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
err = loop_set_dio(lo, arg);
break;
+ case LOOP_SET_BLOCK_SIZE:
+ err = -EPERM;
+ if ((mode & FMODE_WRITE) || capable(CAP_SYS_ADMIN))
+ err = loop_set_block_size(lo, arg);
+ break;
default:
err = lo->ioctl ? lo->ioctl(lo, cmd, arg) : -EINVAL;
}
@@ -1546,6 +1570,7 @@
arg = (unsigned long) compat_ptr(arg);
case LOOP_SET_FD:
case LOOP_CHANGE_FD:
+ case LOOP_SET_BLOCK_SIZE:
err = lo_ioctl(bdev, mode, cmd, arg);
break;
default:
@@ -1781,6 +1806,7 @@
}
lo->lo_queue->queuedata = lo;
+ blk_queue_max_hw_sectors(lo->lo_queue, BLK_DEF_MAX_SECTORS);
/*
* It doesn't make sense to enable merge because the I/O
* submitted to backing file is handled page by page.
diff --git a/drivers/block/vs_block_client.c b/drivers/block/vs_block_client.c
new file mode 100644
index 0000000..974f8b9
--- /dev/null
+++ b/drivers/block/vs_block_client.c
@@ -0,0 +1,956 @@
+/*
+ * drivers/block/vs_block_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * block vservice client driver
+ *
+ * Function vs_block_client_vs_alloc() is partially derived from
+ * drivers/block/brd.c (brd_alloc())
+ *
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/blkdev.h>
+#include <linux/hdreg.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/kref.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/version.h>
+#include <linux/idr.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+#include <vservices/session.h>
+#include <vservices/wait.h>
+
+/*
+ * BLK_DEF_MAX_SECTORS was replaced with the hard-coded number 1024 in 3.19,
+ * and restored in 4.3
+ */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3, 19, 0)) && \
+ (LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0))
+#define BLK_DEF_MAX_SECTORS 1024
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+#define bio_sector(bio) (bio)->bi_iter.bi_sector
+#define bio_size(bio) (bio)->bi_iter.bi_size
+#else
+#define bio_sector(bio) (bio)->bi_sector
+#define bio_size(bio) (bio)->bi_size
+#endif
+
+#define CLIENT_BLKDEV_NAME "vblock"
+
+#define PERDEV_MINORS 256
+
+struct block_client;
+
+struct vs_block_device {
+ /*
+ * The client that created this block device. A reference is held
+ * to the client until the block device is released, so this pointer
+ * should always be valid. However, the client may since have reset;
+ * so it should only be used if, after locking it, its blkdev pointer
+ * points back to this block device.
+ */
+ struct block_client *client;
+
+ int id;
+ struct gendisk *disk;
+ struct request_queue *queue;
+
+ struct kref kref;
+};
+
+struct block_client {
+ struct vs_client_block_state client;
+ struct vs_service_device *service;
+
+ /* Tasklet & queue for bouncing buffers out of read acks */
+ struct tasklet_struct rx_tasklet;
+ struct list_head rx_queue;
+ struct spinlock rx_queue_lock;
+
+ /*
+ * The current virtual block device. This gets replaced when we do
+ * a reset since other parts of the kernel (e.g. vfs) may still
+ * be accessing the disk.
+ */
+ struct vs_block_device *blkdev;
+
+ /* Shared work item for disk creation */
+ struct work_struct disk_creation_work;
+
+ struct kref kref;
+};
+
+#define state_to_block_client(state) \
+ container_of(state, struct block_client, client)
+
+static int block_client_major;
+
+/* Unique identifier allocation for virtual block devices */
+static DEFINE_IDA(vs_block_ida);
+static DEFINE_MUTEX(vs_block_ida_lock);
+
+static int
+block_client_vs_to_linux_error(vservice_block_block_io_error_t vs_err)
+{
+ switch (vs_err) {
+ case VSERVICE_BLOCK_INVALID_INDEX:
+ return -EILSEQ;
+ case VSERVICE_BLOCK_MEDIA_FAILURE:
+ return -EIO;
+ case VSERVICE_BLOCK_MEDIA_TIMEOUT:
+ return -ETIMEDOUT;
+ case VSERVICE_BLOCK_UNSUPPORTED_COMMAND:
+ return -ENOTSUPP;
+ case VSERVICE_BLOCK_SERVICE_RESET:
+ return -ENXIO;
+ default:
+ WARN_ON(vs_err);
+ return 0;
+ }
+
+ return 0;
+}
+
+static void vs_block_client_kfree(struct kref *kref)
+{
+ struct block_client *client =
+ container_of(kref, struct block_client, kref);
+
+ vs_put_service(client->service);
+ kfree(client);
+}
+
+static void vs_block_client_put(struct block_client *client)
+{
+ kref_put(&client->kref, vs_block_client_kfree);
+}
+
+static void vs_block_device_kfree(struct kref *kref)
+{
+ struct vs_block_device *blkdev =
+ container_of(kref, struct vs_block_device, kref);
+
+ /* Delete the disk and clean up its queue */
+ del_gendisk(blkdev->disk);
+ blk_cleanup_queue(blkdev->queue);
+ put_disk(blkdev->disk);
+
+ mutex_lock(&vs_block_ida_lock);
+ ida_remove(&vs_block_ida, blkdev->id);
+ mutex_unlock(&vs_block_ida_lock);
+
+ if (blkdev->client)
+ vs_block_client_put(blkdev->client);
+
+ kfree(blkdev);
+}
+
+static void vs_block_device_put(struct vs_block_device *blkdev)
+{
+ kref_put(&blkdev->kref, vs_block_device_kfree);
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+static void
+#else
+static int
+#endif
+vs_block_client_blkdev_release(struct gendisk *disk, fmode_t mode)
+{
+ struct vs_block_device *blkdev = disk->private_data;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 10, 0)
+ if (WARN_ON(!blkdev))
+ return;
+#else
+ if (WARN_ON(!blkdev))
+ return -ENXIO;
+#endif
+
+ vs_block_device_put(blkdev);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 10, 0)
+ return 0;
+#endif
+}
+
+static int vs_block_client_blkdev_open(struct block_device *bdev, fmode_t mode)
+{
+ struct vs_block_device *blkdev = bdev->bd_disk->private_data;
+ struct block_client *client;
+ int err = -ENXIO;
+
+ if (!blkdev || !kref_get_unless_zero(&blkdev->kref))
+ goto fail_get_blkdev;
+
+ client = blkdev->client;
+ if (WARN_ON(!client))
+ goto fail_lock_client;
+
+ if (!vs_state_lock_safe(&client->client)) {
+ err = -ENODEV;
+ goto fail_lock_client;
+ }
+
+ if (blkdev != client->blkdev) {
+ /* The client has reset, this blkdev is no longer usable */
+ err = -ENXIO;
+ goto fail_check_client;
+ }
+
+ if ((mode & FMODE_WRITE) > 0 && client->client.readonly) {
+ dev_dbg(&client->service->dev,
+ "opening a readonly disk as writable\n");
+ err = -EROFS;
+ goto fail_check_client;
+ }
+
+ vs_state_unlock(&client->client);
+
+ return 0;
+
+fail_check_client:
+ vs_state_unlock(&client->client);
+fail_lock_client:
+ vs_block_device_put(blkdev);
+fail_get_blkdev:
+ return err;
+}
+
+static int vs_block_client_blkdev_getgeo(struct block_device *bdev,
+ struct hd_geometry *geo)
+{
+ /* These numbers are some default sane values for disk geometry. */
+ geo->cylinders = get_capacity(bdev->bd_disk) / (4 * 16);
+ geo->heads = 4;
+ geo->sectors = 16;
+
+ return 0;
+}
+
+/*
+ * Indirectly determine linux block layer sector size and ensure that our
+ * sector size matches.
+ */
+static int vs_block_client_check_sector_size(struct block_client *client,
+ struct bio *bio)
+{
+ unsigned int expected_bytes;
+
+ if (unlikely(!bio_sectors(bio))) {
+ dev_err(&client->service->dev, "zero-length bio");
+ return -EIO;
+ }
+
+ expected_bytes = bio_sectors(bio) * client->client.sector_size;
+ if (unlikely(bio_size(bio) != expected_bytes)) {
+ dev_err(&client->service->dev,
+ "bio has %zd bytes, which is unexpected "
+ "for %d sectors of %zd bytes each",
+ (size_t)bio_size(bio), bio_sectors(bio),
+ (size_t)client->client.sector_size);
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static const struct block_device_operations block_client_ops = {
+ .getgeo = vs_block_client_blkdev_getgeo,
+ .open = vs_block_client_blkdev_open,
+ .release = vs_block_client_blkdev_release,
+ .owner = THIS_MODULE,
+};
+
+static int block_client_send_write_req(struct block_client *client,
+ struct bio *bio)
+{
+ struct vs_client_block_state *state = &client->client;
+ struct vs_mbuf *mbuf;
+ struct vs_pbuf pbuf;
+ struct bio_vec *bvec;
+ int err;
+ bool flush, nodelay, commit;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+ struct bvec_iter iter;
+ struct bio_vec bvec_local;
+#else
+ int i;
+#endif
+
+ err = vs_block_client_check_sector_size(client, bio);
+ if (err < 0)
+ goto fail;
+
+ do {
+ /* Wait until it's possible to send a write request */
+ err = vs_wait_state_nointr(state,
+ vs_client_block_io_req_write_can_send(state));
+ if (err == -ECANCELED)
+ err = -ENXIO;
+ if (err < 0)
+ goto fail;
+
+ /* Wait for quota, while sending a write remains possible */
+ mbuf = vs_wait_alloc_nointr(state,
+ vs_client_block_io_req_write_can_send(state),
+ vs_client_block_io_alloc_req_write(
+ state, &pbuf, GFP_KERNEL));
+ err = IS_ERR(mbuf) ? PTR_ERR(mbuf) : 0;
+
+ /* Retry if sending is no longer possible */
+ } while (err == -ECANCELED);
+
+ if (err < 0)
+ goto fail;
+
+ vs_pbuf_resize(&pbuf, 0);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+ bvec = &bvec_local;
+ bio_for_each_segment(bvec_local, bio, iter)
+#else
+ bio_for_each_segment(bvec, bio, i)
+#endif
+ {
+ unsigned long flags;
+ void *buf = bvec_kmap_irq(bvec, &flags);
+ flush_kernel_dcache_page(bvec->bv_page);
+ err = vs_pbuf_append(&pbuf, buf, bvec->bv_len);
+ bvec_kunmap_irq(buf, &flags);
+ if (err < 0) {
+ dev_err(&client->service->dev,
+ "pbuf copy failed with err %d\n", err);
+ err = -EIO;
+ goto fail_free_write;
+ }
+ }
+
+ if (unlikely(vs_pbuf_size(&pbuf) != bio_size(bio))) {
+ dev_err(&client->service->dev,
+ "pbuf size is wrong: %zd, should be %zd\n",
+ vs_pbuf_size(&pbuf), (size_t)bio_size(bio));
+ err = -EIO;
+ goto fail_free_write;
+ }
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+ flush = (bio_flags(bio) & REQ_PREFLUSH);
+ commit = (bio_flags(bio) & REQ_FUA);
+ nodelay = (bio_flags(bio) & REQ_SYNC);
+#else
+ flush = (bio->bi_rw & REQ_FLUSH);
+ commit = (bio->bi_rw & REQ_FUA);
+ nodelay = (bio->bi_rw & REQ_SYNC);
+#endif
+ err = vs_client_block_io_req_write(state, bio, bio_sector(bio),
+ bio_sectors(bio), nodelay, flush, commit, pbuf, mbuf);
+
+ if (err) {
+ dev_err(&client->service->dev,
+ "write req failed with err %d\n", err);
+ goto fail_free_write;
+ }
+
+ return 0;
+
+fail_free_write:
+ vs_client_block_io_free_req_write(state, &pbuf, mbuf);
+fail:
+ return err;
+}
+
+static int block_client_send_read_req(struct block_client *client,
+ struct bio *bio)
+{
+ struct vs_client_block_state *state = &client->client;
+ int err;
+ bool flush, nodelay;
+
+ err = vs_block_client_check_sector_size(client, bio);
+ if (err < 0)
+ return err;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+ flush = (bio_flags(bio) & REQ_PREFLUSH);
+ nodelay = (bio_flags(bio) & REQ_SYNC);
+#else
+ flush = (bio->bi_rw & REQ_FLUSH);
+ nodelay = (bio->bi_rw & REQ_SYNC);
+#endif
+ do {
+ /* Wait until it's possible to send a read request */
+ err = vs_wait_state_nointr(state,
+ vs_client_block_io_req_read_can_send(state));
+ if (err == -ECANCELED)
+ err = -ENXIO;
+ if (err < 0)
+ break;
+
+ /* Wait for quota, while sending a read remains possible */
+ err = vs_wait_send_nointr(state,
+ vs_client_block_io_req_read_can_send(state),
+ vs_client_block_io_req_read(state, bio,
+ bio_sector(bio), bio_sectors(bio),
+ nodelay, flush, GFP_KERNEL));
+ } while (err == -ECANCELED);
+
+ return err;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+static blk_qc_t
+#else
+static void
+#endif
+vs_block_client_make_request(struct request_queue *q, struct bio *bio)
+{
+ struct block_device *bdev = bio->bi_bdev;
+ struct vs_block_device *blkdev = bdev->bd_disk->private_data;
+ struct block_client *client;
+ int err = 0;
+
+ client = blkdev->client;
+ if (!client || !kref_get_unless_zero(&client->kref)) {
+ err = -ENODEV;
+ goto fail_get_client;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ blk_queue_split(q, &bio, q->bio_split);
+#endif
+
+ if (!vs_state_lock_safe(&client->client)) {
+ err = -ENODEV;
+ goto fail_lock_client;
+ }
+
+ if (client->blkdev != blkdev) {
+ /* Client has reset, this block device is no longer usable */
+ err = -EIO;
+ goto fail_check_client;
+ }
+
+ if (bio_data_dir(bio) == WRITE)
+ err = block_client_send_write_req(client, bio);
+ else
+ err = block_client_send_read_req(client, bio);
+
+fail_check_client:
+ if (err == -ENOLINK)
+ err = -EIO;
+ else
+ vs_state_unlock(&client->client);
+fail_lock_client:
+ vs_block_client_put(client);
+fail_get_client:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ if (err < 0) {
+ bio->bi_error = err;
+ bio_endio(bio);
+ }
+#else
+ if (err < 0)
+ bio_endio(bio, err);
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 4, 0)
+ return BLK_QC_T_NONE;
+#endif
+}
+
+static int vs_block_client_get_blkdev_id(struct block_client *client)
+{
+ int id;
+ int ret;
+
+retry:
+ ret = ida_pre_get(&vs_block_ida, GFP_KERNEL);
+ if (ret == 0)
+ return -ENOMEM;
+
+ mutex_lock(&vs_block_ida_lock);
+ ret = ida_get_new(&vs_block_ida, &id);
+ mutex_unlock(&vs_block_ida_lock);
+
+ if (ret == -EAGAIN)
+ goto retry;
+
+ return id;
+}
+
+static int vs_block_client_disk_add(struct block_client *client)
+{
+ struct vs_block_device *blkdev;
+ unsigned int max_hw_sectors;
+ int err;
+
+ dev_dbg(&client->service->dev, "device add\n");
+
+ blkdev = kzalloc(sizeof(*blkdev), GFP_KERNEL);
+ if (!blkdev) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ kref_init(&blkdev->kref);
+ blkdev->id = vs_block_client_get_blkdev_id(client);
+ if (blkdev->id < 0) {
+ err = blkdev->id;
+ goto fail_free_blkdev;
+ }
+
+ if ((blkdev->id * PERDEV_MINORS) >> MINORBITS) {
+ err = -ENODEV;
+ goto fail_remove_ida;
+ }
+
+ blkdev->queue = blk_alloc_queue(GFP_KERNEL);
+ if (!blkdev->queue) {
+ dev_err(&client->service->dev,
+ "Error initializing blk queue\n");
+ err = -ENOMEM;
+ goto fail_remove_ida;
+ }
+
+ blk_queue_make_request(blkdev->queue, vs_block_client_make_request);
+ blk_queue_bounce_limit(blkdev->queue, BLK_BOUNCE_ANY);
+ blk_queue_dma_alignment(blkdev->queue, 0);
+
+ /*
+ * Mark this as a paravirtualised device. This is just an alias
+ * of QUEUE_FLAG_NONROT, which prevents the I/O schedulers trying
+ * to wait for the disk to spin.
+ */
+ queue_flag_set_unlocked(QUEUE_FLAG_VIRT, blkdev->queue);
+
+ blkdev->queue->queuedata = blkdev;
+
+ blkdev->client = client;
+ kref_get(&client->kref);
+
+ max_hw_sectors = min_t(sector_t, BLK_DEF_MAX_SECTORS,
+ client->client.segment_size /
+ client->client.sector_size);
+ blk_queue_max_hw_sectors(blkdev->queue, max_hw_sectors);
+
+ blkdev->disk = alloc_disk(PERDEV_MINORS);
+ if (!blkdev->disk) {
+ dev_err(&client->service->dev, "Error allocating disk\n");
+ err = -ENOMEM;
+ goto fail_free_blk_queue;
+ }
+
+ if (client->client.readonly) {
+ dev_dbg(&client->service->dev, "set device as readonly\n");
+ set_disk_ro(blkdev->disk, true);
+ }
+
+ blkdev->disk->major = block_client_major;
+ blkdev->disk->first_minor = blkdev->id * PERDEV_MINORS;
+ blkdev->disk->fops = &block_client_ops;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+ blkdev->disk->driverfs_dev = &client->service->dev;
+#endif
+ blkdev->disk->private_data = blkdev;
+ blkdev->disk->queue = blkdev->queue;
+ blkdev->disk->flags |= GENHD_FL_EXT_DEVT;
+
+ /*
+ * The block device name is vblock<x>, where x is a unique
+ * identifier. Userspace should rename or symlink the device
+ * appropriately, typically by processing the add uevent.
+ *
+ * If a virtual block device is reset then it may re-open with a
+ * different identifier if something still holds a reference to
+ * the old device (such as a userspace application having an open
+ * file handle).
+ */
+ snprintf(blkdev->disk->disk_name, sizeof(blkdev->disk->disk_name),
+ "%s%d", CLIENT_BLKDEV_NAME, blkdev->id);
+ set_capacity(blkdev->disk, client->client.device_sectors);
+
+ /*
+ * We need to hold a reference on blkdev across add_disk(), to make
+ * sure a concurrent reset does not immediately release the blkdev
+ * and call del_gendisk().
+ */
+ kref_get(&blkdev->kref);
+
+ vs_service_state_lock(client->service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(client->client.state.base)) {
+ vs_service_state_unlock(client->service);
+ err = -ENXIO;
+ goto fail_free_blk_queue;
+ }
+ client->blkdev = blkdev;
+ vs_service_state_unlock(client->service);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
+ device_add_disk(&client->service->dev, blkdev->disk);
+#else
+ add_disk(blkdev->disk);
+#endif
+ dev_dbg(&client->service->dev, "added block disk '%s'\n",
+ blkdev->disk->disk_name);
+
+ /* Release the reference taken above. */
+ vs_block_device_put(blkdev);
+
+ return 0;
+
+fail_free_blk_queue:
+ blk_cleanup_queue(blkdev->queue);
+fail_remove_ida:
+ mutex_lock(&vs_block_ida_lock);
+ ida_remove(&vs_block_ida, blkdev->id);
+ mutex_unlock(&vs_block_ida_lock);
+fail_free_blkdev:
+ kfree(blkdev);
+fail:
+ return err;
+}
+
+static void vs_block_client_disk_creation_work(struct work_struct *work)
+{
+ struct block_client *client = container_of(work,
+ struct block_client, disk_creation_work);
+ struct vs_block_device *blkdev;
+ bool running;
+
+ vs_service_state_lock(client->service);
+ blkdev = client->blkdev;
+ running = VSERVICE_BASE_STATE_IS_RUNNING(client->client.state.base);
+
+ dev_dbg(&client->service->dev,
+ "disk changed: blkdev = %pK, running = %d\n",
+ client->blkdev, running);
+ if (!blkdev && running) {
+ dev_dbg(&client->service->dev, "adding block disk\n");
+ vs_service_state_unlock(client->service);
+ vs_block_client_disk_add(client);
+ } else {
+ vs_service_state_unlock(client->service);
+ }
+}
+
+static void vs_block_client_rx_tasklet(unsigned long data);
+
+static struct vs_client_block_state *
+vs_block_client_alloc(struct vs_service_device *service)
+{
+ struct block_client *client;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client) {
+ dev_err(&service->dev, "Error allocating client struct\n");
+ return NULL;
+ }
+
+ vs_get_service(service);
+ client->service = service;
+
+ INIT_LIST_HEAD(&client->rx_queue);
+ spin_lock_init(&client->rx_queue_lock);
+ tasklet_init(&client->rx_tasklet, vs_block_client_rx_tasklet,
+ (unsigned long)client);
+ tasklet_disable(&client->rx_tasklet);
+
+ INIT_WORK(&client->disk_creation_work,
+ vs_block_client_disk_creation_work);
+ kref_init(&client->kref);
+
+ dev_dbg(&service->dev, "New block client %pK\n", client);
+
+ return &client->client;
+}
+
+static void vs_block_client_release(struct vs_client_block_state *state)
+{
+ struct block_client *client = state_to_block_client(state);
+
+ flush_work(&client->disk_creation_work);
+
+ vs_block_client_put(client);
+}
+
+/* FIXME: Jira ticket SDK-2459 - anjaniv */
+static void vs_block_client_closed(struct vs_client_block_state *state)
+{
+ struct block_client *client = state_to_block_client(state);
+
+ /*
+ * Stop the RX bounce tasklet and clean up its queue. We can wait for
+ * it to stop safely because it doesn't need to acquire the state
+ * lock, only the RX lock which we acquire after it is disabled.
+ */
+ tasklet_disable(&client->rx_tasklet);
+ spin_lock(&client->rx_queue_lock);
+ while (!list_empty(&client->rx_queue)) {
+ struct vs_mbuf *mbuf = list_first_entry(&client->rx_queue,
+ struct vs_mbuf, queue);
+ struct vs_pbuf pbuf;
+ list_del(&mbuf->queue);
+ vs_client_block_io_getbufs_ack_read(state, &pbuf, mbuf);
+ vs_client_block_io_free_ack_read(state, &pbuf, mbuf);
+ }
+ spin_unlock(&client->rx_queue_lock);
+
+ if (client->blkdev) {
+ struct vs_block_device *blkdev = client->blkdev;
+ char service_remove[] = "REMOVING_SERVICE=1";
+ /* + 9 because "DEVNAME=" is 8 chars plus 1 for '\0' */
+ char devname[sizeof(blkdev->disk->disk_name) + 9];
+ char *envp[] = { service_remove, devname, NULL };
+
+ dev_dbg(&client->service->dev, "removing block disk\n");
+
+ /*
+ * Send a change event with DEVNAME to allow the block helper
+ * script to remove any server sessions which use either
+ * v${SERVICE_NAME} or ${DEVNAME}. The remove event generated
+ * by the session driver doesn't include DEVNAME so the only
+ * way for userspace to map SERVICE_NAME to DEVNAME is by the
+ * symlink added when the client service was created. If that
+ * symlink has been deleted, there's no other way to connect
+ * the two names.
+ */
+ snprintf(devname, sizeof(devname), "DEVNAME=%s",
+ blkdev->disk->disk_name);
+ kobject_uevent_env(&client->service->dev.kobj, KOBJ_CHANGE,
+ envp);
+
+ /*
+ * We are done with the device now. The block device will only
+ * get removed once there are no more users (e.g. userspace
+ * applications).
+ */
+ client->blkdev = NULL;
+ vs_block_device_put(blkdev);
+ }
+}
+
+static void vs_block_client_opened(struct vs_client_block_state *state)
+{
+ struct block_client *client = state_to_block_client(state);
+
+#if !defined(CONFIG_LBDAF) && !defined(CONFIG_64BIT)
+ if (state->device_sectors >> (sizeof(sector_t) * 8)) {
+ dev_err(&client->service->dev,
+ "Client doesn't support full capacity large block devices\n");
+ vs_client_block_close(state);
+ return;
+ }
+#endif
+
+ /* Unblock the RX bounce tasklet. */
+ tasklet_enable(&client->rx_tasklet);
+
+ /*
+ * The block device allocation needs to sleep, so we defer it to a
+ * work queue.
+ */
+ queue_work(client->service->work_queue, &client->disk_creation_work);
+}
+
+static int vs_block_client_ack_read(struct vs_client_block_state *state,
+ void *tag, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+ struct block_client *client = state_to_block_client(state);
+ struct bio *bio = tag;
+ struct bio_vec *bvec;
+ int err = 0;
+ size_t bytes_read = 0;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+ struct bio_vec bvec_local;
+ struct bvec_iter iter;
+#else
+ int i;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 14, 0)
+ bvec = &bvec_local;
+ bio_for_each_segment(bvec_local, bio, iter)
+#else
+ bio_for_each_segment(bvec, bio, i)
+#endif
+ {
+ unsigned long flags;
+ void *buf;
+ if (vs_pbuf_size(&pbuf) < bytes_read + bvec->bv_len) {
+ dev_err(&client->service->dev,
+ "bio read overrun: %zu into %zu byte response, but need %zd bytes\n",
+ bytes_read, vs_pbuf_size(&pbuf),
+ (size_t)bvec->bv_len);
+ err = -EIO;
+ break;
+ }
+ buf = bvec_kmap_irq(bvec, &flags);
+ memcpy(buf, vs_pbuf_data(&pbuf) + bytes_read, bvec->bv_len);
+ flush_kernel_dcache_page(bvec->bv_page);
+ bvec_kunmap_irq(buf, &flags);
+ bytes_read += bvec->bv_len;
+ }
+
+ vs_client_block_io_free_ack_read(state, &pbuf, mbuf);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ if (err < 0)
+ bio->bi_error = err;
+ bio_endio(bio);
+#else
+ bio_endio(bio, err);
+#endif
+
+ return 0;
+}
+
+static void vs_block_client_rx_tasklet(unsigned long data)
+{
+ struct block_client *client = (struct block_client *)data;
+ struct vs_mbuf *mbuf;
+ struct vs_pbuf pbuf;
+
+ spin_lock(&client->rx_queue_lock);
+
+ /* The list shouldn't be empty. */
+ if (WARN_ON(list_empty(&client->rx_queue))) {
+ spin_unlock(&client->rx_queue_lock);
+ return;
+ }
+
+ /* Get the next mbuf, and reschedule ourselves if there are more. */
+ mbuf = list_first_entry(&client->rx_queue, struct vs_mbuf, queue);
+ list_del(&mbuf->queue);
+ if (!list_empty(&client->rx_queue))
+ tasklet_schedule(&client->rx_tasklet);
+
+ spin_unlock(&client->rx_queue_lock);
+
+ /* Process the ack. */
+ vs_client_block_io_getbufs_ack_read(&client->client, &pbuf, mbuf);
+ vs_block_client_ack_read(&client->client, mbuf->priv, pbuf, mbuf);
+}
+
+static int vs_block_client_queue_ack_read(struct vs_client_block_state *state,
+ void *tag, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+ struct block_client *client = state_to_block_client(state);
+
+ spin_lock(&client->rx_queue_lock);
+ list_add_tail(&mbuf->queue, &client->rx_queue);
+ mbuf->priv = tag;
+ spin_unlock(&client->rx_queue_lock);
+
+ tasklet_schedule(&client->rx_tasklet);
+
+ wake_up(&state->service->quota_wq);
+
+ return 0;
+}
+
+static int vs_block_client_ack_write(struct vs_client_block_state *state,
+ void *tag)
+{
+ struct bio *bio = tag;
+
+ if (WARN_ON(!bio))
+ return -EPROTO;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ bio_endio(bio);
+#else
+ bio_endio(bio, 0);
+#endif
+
+ wake_up(&state->service->quota_wq);
+
+ return 0;
+}
+
+static int vs_block_client_nack_io(struct vs_client_block_state *state,
+ void *tag, vservice_block_block_io_error_t err)
+{
+ struct bio *bio = tag;
+
+ if (WARN_ON(!bio))
+ return -EPROTO;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ bio->bi_error = block_client_vs_to_linux_error(err);
+ bio_endio(bio);
+#else
+ bio_endio(bio, block_client_vs_to_linux_error(err));
+#endif
+
+ wake_up(&state->service->quota_wq);
+
+ return 0;
+}
+
+static struct vs_client_block block_client_driver = {
+ .rx_atomic = true,
+ .alloc = vs_block_client_alloc,
+ .release = vs_block_client_release,
+ .opened = vs_block_client_opened,
+ .closed = vs_block_client_closed,
+ .io = {
+ .ack_read = vs_block_client_queue_ack_read,
+ .nack_read = vs_block_client_nack_io,
+ .ack_write = vs_block_client_ack_write,
+ .nack_write = vs_block_client_nack_io,
+ }
+};
+
+static int __init vs_block_client_init(void)
+{
+ int err;
+
+ block_client_major = register_blkdev(0, CLIENT_BLKDEV_NAME);
+ if (block_client_major < 0) {
+ pr_err("Err registering blkdev\n");
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ err = vservice_block_client_register(&block_client_driver,
+ "block_client_driver");
+ if (err)
+ goto fail_unregister_blkdev;
+
+ return 0;
+
+fail_unregister_blkdev:
+ unregister_blkdev(block_client_major, CLIENT_BLKDEV_NAME);
+fail:
+ return err;
+}
+
+static void __exit vs_block_client_exit(void)
+{
+ vservice_block_client_unregister(&block_client_driver);
+ unregister_blkdev(block_client_major, CLIENT_BLKDEV_NAME);
+}
+
+module_init(vs_block_client_init);
+module_exit(vs_block_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Block Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/block/vs_block_server.c b/drivers/block/vs_block_server.c
new file mode 100644
index 0000000..9d20f6a
--- /dev/null
+++ b/drivers/block/vs_block_server.c
@@ -0,0 +1,1179 @@
+/*
+ * drivers/block/vs_block_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * block vservice server driver
+ *
+ */
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/genhd.h>
+#include <linux/fs.h>
+#include <linux/bio.h>
+#include <linux/blkdev.h>
+#include <linux/spinlock.h>
+#include <linux/module.h>
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/server.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+#include <vservices/wait.h>
+
+#define VS_BLOCK_BLKDEV_DEFAULT_MODE FMODE_READ
+#define VS_BLOCK_BLK_DEF_SECTOR_SIZE 512
+
+/*
+ * Metadata for a request. Note that the bio must be embedded at the end of
+ * this structure, because it is allocated from a bioset.
+ */
+struct block_server_request {
+ struct block_server *server;
+ u32 tagid;
+ u32 size;
+ int op_err;
+ struct list_head list;
+ struct vs_pbuf pbuf;
+ struct vs_mbuf *mbuf;
+ bool bounced;
+ bool submitted;
+
+ struct bio bio;
+};
+
+struct block_server {
+ struct vs_server_block_state server;
+ struct vs_service_device *service;
+
+ struct block_device *bdev;
+ struct bio_set *bioset;
+
+ unsigned int sector_size;
+ bool started;
+
+ /* Bounced writes are deferred to keep memcpy off service queue */
+ struct list_head bounce_req_queue;
+ struct work_struct bounce_req_work;
+ spinlock_t bounce_req_lock;
+
+ /* Count of outstanding requests submitted to block layer */
+ atomic_t submitted_req_count;
+ wait_queue_head_t submitted_req_wq;
+
+ /* Completions are deferred because end_io may be in atomic context */
+ struct list_head completed_req_queue;
+ struct work_struct completed_req_work;
+ spinlock_t completed_req_lock;
+};
+
+#define state_to_block_server(state) \
+ container_of(state, struct block_server, server)
+
+#define dev_to_block_server(dev) \
+ state_to_block_server(dev_get_drvdata(dev))
+
+static inline vservice_block_block_io_error_t
+block_server_linux_to_vs_error(int err)
+{
+ /*
+ * This list is not exhaustive. For all other errors, we return
+ * unsupported_command.
+ */
+ switch (err) {
+ case -ECOMM:
+ case -EIO:
+ case -ENOMEM:
+ return VSERVICE_BLOCK_MEDIA_FAILURE;
+ case -ETIME:
+ case -ETIMEDOUT:
+ return VSERVICE_BLOCK_MEDIA_TIMEOUT;
+ case -EILSEQ:
+ return VSERVICE_BLOCK_INVALID_INDEX;
+ default:
+ if (err)
+ return VSERVICE_BLOCK_UNSUPPORTED_COMMAND;
+ return 0;
+ }
+
+ return 0;
+}
+
+static inline u32 vs_req_num_sectors(struct block_server *server,
+ struct block_server_request *req)
+{
+ return req->size / server->sector_size;
+}
+
+static inline u64 vs_req_sector_index(struct block_server_request *req)
+{
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ return req->bio.bi_iter.bi_sector;
+#else
+ return req->bio.bi_sector;
+#endif
+}
+
+static void vs_block_server_closed(struct vs_server_block_state *state)
+{
+ struct block_server *server = state_to_block_server(state);
+ struct block_server_request *req;
+
+ /*
+ * Fail all requests that haven't been sent to the block layer yet.
+ */
+ spin_lock(&server->bounce_req_lock);
+ while (!list_empty(&server->bounce_req_queue)) {
+ req = list_first_entry(&server->bounce_req_queue,
+ struct block_server_request, list);
+ list_del(&req->list);
+ spin_unlock(&server->bounce_req_lock);
+ bio_io_error(&req->bio);
+ spin_lock(&server->bounce_req_lock);
+ }
+ spin_unlock(&server->bounce_req_lock);
+
+ /*
+ * Wait until all outstanding requests to the block layer are
+ * complete.
+ */
+ wait_event(server->submitted_req_wq,
+ !atomic_read(&server->submitted_req_count));
+
+ /*
+ * Discard all the completed requests.
+ */
+ spin_lock_irq(&server->completed_req_lock);
+ while (!list_empty(&server->completed_req_queue)) {
+ req = list_first_entry(&server->completed_req_queue,
+ struct block_server_request, list);
+ list_del(&req->list);
+ if (req->mbuf) {
+ spin_unlock_irq(&server->completed_req_lock);
+ if (bio_data_dir(&req->bio) == WRITE)
+ vs_server_block_io_free_req_write(state,
+ &req->pbuf, req->mbuf);
+ else
+ vs_server_block_io_free_ack_read(state,
+ &req->pbuf, req->mbuf);
+ spin_lock_irq(&server->completed_req_lock);
+ }
+ bio_put(&req->bio);
+ }
+ spin_unlock_irq(&server->completed_req_lock);
+}
+
+static ssize_t
+vs_block_server_readonly_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct block_server *server = dev_to_block_server(dev);
+ int err;
+ unsigned long val;
+
+ vs_service_state_lock(server->service);
+ if (server->started) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ goto unlock;
+
+ if (bdev_read_only(server->bdev) && !val) {
+ dev_info(dev,
+ "Cannot set %s to read/write: read-only device\n",
+ server->service->name);
+ err = -EINVAL;
+ goto unlock;
+ }
+
+ server->server.readonly = val;
+ err = count;
+
+unlock:
+ vs_service_state_unlock(server->service);
+
+ return err;
+}
+
+static ssize_t
+vs_block_server_readonly_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct block_server *server = dev_to_block_server(dev);
+ int cnt;
+
+ vs_service_state_lock(server->service);
+ cnt = scnprintf(buf, PAGE_SIZE, "%d\n", server->server.readonly);
+ vs_service_state_unlock(server->service);
+
+ return cnt;
+}
+
+static ssize_t
+vs_block_server_start_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct block_server *server = dev_to_block_server(dev);
+ int err;
+ unsigned long val;
+
+ vs_service_state_lock(server->service);
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ goto unlock;
+
+ if (!val && server->started) {
+ err = -EBUSY;
+ goto unlock;
+ }
+
+ if (val && !server->started) {
+ server->started = true;
+
+ if (server->server.state.base.statenum ==
+ VSERVICE_BASE_STATE_CLOSED__OPEN)
+ vs_server_block_open_complete(&server->server,
+ VS_SERVER_RESP_SUCCESS);
+ }
+
+ err = count;
+unlock:
+ vs_service_state_unlock(server->service);
+
+ return err;
+}
+
+static ssize_t
+vs_block_server_start_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct block_server *server = dev_to_block_server(dev);
+ int cnt;
+
+ vs_service_state_lock(server->service);
+ cnt = scnprintf(buf, PAGE_SIZE, "%d\n", server->started);
+ vs_service_state_unlock(server->service);
+
+ return cnt;
+}
+
+static DEVICE_ATTR(start, S_IWUSR | S_IRUSR, vs_block_server_start_show,
+ vs_block_server_start_store);
+static DEVICE_ATTR(readonly, S_IWUSR | S_IRUSR, vs_block_server_readonly_show,
+ vs_block_server_readonly_store);
+
+static struct attribute *vs_block_server_dev_attrs[] = {
+ &dev_attr_start.attr,
+ &dev_attr_readonly.attr,
+ NULL,
+};
+
+static const struct attribute_group vs_block_server_attr_group = {
+ .attrs = vs_block_server_dev_attrs
+};
+
+/*
+ * Invoked by vs_server_block_handle_req_open() after receiving open
+ * requests to perform server specific initialisations
+ *
+ * The "delayed start" feature can be enforced here
+ */
+static vs_server_response_type_t
+vs_block_server_open(struct vs_server_block_state * _state)
+{
+ struct block_server *server = state_to_block_server(_state);
+
+ return (server->started) ? VS_SERVER_RESP_SUCCESS :
+ VS_SERVER_RESP_EXPLICIT_COMPLETE;
+}
+
+static int
+vs_block_server_complete_req_read(struct block_server_request *req)
+{
+ struct block_server *server = req->server;
+ struct vs_server_block_state *state = &server->server;
+ int err = -EIO;
+
+ if (req->op_err) {
+ err = req->op_err;
+ dev_dbg(&server->service->dev,
+ "read nack, err %d sector 0x%llx num 0x%x\n",
+ err, vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+
+ if (req->mbuf)
+ vs_server_block_io_free_ack_read(state, &req->pbuf,
+ req->mbuf);
+
+ err = vs_server_block_io_send_nack_read(state, req->tagid,
+ block_server_linux_to_vs_error(err),
+ GFP_KERNEL);
+ } else {
+ if (req->bounced && !req->mbuf) {
+ req->mbuf = vs_server_block_io_alloc_ack_read(
+ &server->server, &req->pbuf,
+ GFP_KERNEL);
+ if (IS_ERR(req->mbuf)) {
+ err = PTR_ERR(req->mbuf);
+ req->mbuf = NULL;
+ }
+ }
+
+ if (req->bounced && req->mbuf) {
+ int i;
+ struct bio_vec *bv;
+ void *data = req->pbuf.data;
+
+ if (vs_pbuf_resize(&req->pbuf, req->size) < 0) {
+ bio_io_error(&req->bio);
+ return 0;
+ }
+
+ bio_for_each_segment_all(bv, &req->bio, i) {
+ memcpy(data, page_address(bv->bv_page) +
+ bv->bv_offset, bv->bv_len);
+ data += bv->bv_len;
+ __free_page(bv->bv_page);
+ }
+ req->bounced = false;
+ }
+
+ if (req->mbuf) {
+ dev_vdbg(&server->service->dev,
+ "read ack, sector 0x%llx num 0x%x\n",
+ vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+
+ err = vs_server_block_io_send_ack_read(state,
+ req->tagid, req->pbuf, req->mbuf);
+
+ if (err && (err != -ENOBUFS)) {
+ vs_server_block_io_free_ack_read(state,
+ &req->pbuf, req->mbuf);
+ req->mbuf = NULL;
+ }
+ } else {
+ WARN_ON(!err || !req->bounced);
+ }
+ }
+
+ if (err && (err != -ENOBUFS))
+ dev_dbg(&server->service->dev,
+ "error %d sending read reply\n", err);
+ else if (err == -ENOBUFS)
+ dev_vdbg(&server->service->dev, "out of quota, will retry\n");
+
+ return err;
+}
+
+static int
+vs_block_server_complete_req_write(struct block_server_request *req)
+{
+ struct block_server *server = req->server;
+ struct vs_server_block_state *state = &server->server;
+ int err;
+
+ WARN_ON(req->mbuf);
+
+ if (req->op_err) {
+ dev_dbg(&server->service->dev,
+ "write nack, err %d sector 0x%llx num 0x%x\n",
+ req->op_err, vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+
+ err = vs_server_block_io_send_nack_write(state, req->tagid,
+ block_server_linux_to_vs_error(req->op_err),
+ GFP_KERNEL);
+ } else {
+ dev_vdbg(&server->service->dev,
+ "write ack, sector 0x%llx num 0x%x\n",
+ vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+
+ err = vs_server_block_io_send_ack_write(state, req->tagid,
+ GFP_KERNEL);
+ }
+
+ if (err && (err != -ENOBUFS))
+ dev_dbg(&server->service->dev,
+ "error %d sending write reply\n", err);
+ else if (err == -ENOBUFS)
+ dev_vdbg(&server->service->dev, "out of quota, will retry\n");
+
+ return err;
+}
+
+static int vs_block_server_complete_req(struct block_server *server,
+ struct block_server_request *req)
+{
+ int err;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ req->bio.bi_iter.bi_idx = 0;
+#else
+ req->bio.bi_idx = 0;
+#endif
+ if (!vs_state_lock_safe(&server->server))
+ return -ENOLINK;
+
+ if (bio_data_dir(&req->bio) == WRITE)
+ err = vs_block_server_complete_req_write(req);
+ else
+ err = vs_block_server_complete_req_read(req);
+
+ vs_state_unlock(&server->server);
+
+ if (err == -ENOBUFS)
+ dev_vdbg(&server->service->dev, "bio %pK response out of quota, will retry\n", &req->bio);
+
+ return err;
+}
+
+static void vs_block_server_complete_requests_work(struct work_struct *work)
+{
+ struct block_server *server = container_of(work, struct block_server,
+ completed_req_work);
+ struct block_server_request *req;
+
+ vs_service_send_batch_start(server->service, false);
+
+ /*
+ * Send ack/nack responses for each completed request. If a request
+ * cannot be sent because we are over-quota then this function will
+ * return with a non-empty list, and the tx_ready handler will
+ * reschedule us when we are back under quota. In all other cases
+ * this function will return with an empty list.
+ */
+ spin_lock_irq(&server->completed_req_lock);
+ while (!list_empty(&server->completed_req_queue)) {
+ int err;
+ req = list_first_entry(&server->completed_req_queue,
+ struct block_server_request, list);
+ dev_vdbg(&server->service->dev, "complete bio %pK\n", &req->bio);
+ list_del(&req->list);
+ spin_unlock_irq(&server->completed_req_lock);
+
+ err = vs_block_server_complete_req(server, req);
+ if (err == -ENOBUFS) {
+ dev_vdbg(&server->service->dev, "defer bio %pK\n", &req->bio);
+ /*
+ * Couldn't send the completion; re-queue the request
+ * and exit. We'll start again when more quota becomes
+ * available.
+ */
+ spin_lock_irq(&server->completed_req_lock);
+ list_add_tail(&req->list,
+ &server->completed_req_queue);
+ break;
+ }
+
+ dev_vdbg(&server->service->dev, "free bio %pK err %d\n", &req->bio, err);
+ bio_put(&req->bio);
+
+ spin_lock_irq(&server->completed_req_lock);
+ }
+ spin_unlock_irq(&server->completed_req_lock);
+
+ vs_service_send_batch_end(server->service, true);
+}
+
+static int vs_block_server_tx_ready(struct vs_server_block_state *state)
+{
+ struct block_server *server = state_to_block_server(state);
+
+ schedule_work(&server->completed_req_work);
+
+ return 0;
+}
+
+static bool vs_block_can_map_pbuf(struct request_queue *q,
+ struct vs_pbuf *pbuf, size_t size)
+{
+ /* The pbuf must satisfy the driver's alignment requirements. */
+ if (!blk_rq_aligned(q, (unsigned long)pbuf->data, size))
+ return false;
+
+ /*
+ * bios can only contain pages. Sometime the pbuf is in an IO region
+ * that has no struct page (e.g. a channel primary buffer), in which
+ * case we can't map it into a bio.
+ */
+ /* FIXME: Redmine issue #930 - philip. */
+ if (!pfn_valid(__pa(pbuf->data) >> PAGE_SHIFT))
+ return false;
+
+ return true;
+}
+
+static int vs_block_bio_map_pbuf(struct bio *bio, struct vs_pbuf *pbuf)
+{
+ int offset = offset_in_page((unsigned long)pbuf->data);
+ void *ptr = pbuf->data;
+ int size = pbuf->size;
+
+ while (size > 0) {
+ unsigned bytes = min_t(unsigned, PAGE_SIZE - offset, size);
+
+ if (bio_add_page(bio, virt_to_page(ptr), bytes,
+ offset) < bytes)
+ return -EIO;
+
+ ptr += bytes;
+ size -= bytes;
+ offset = 0;
+ }
+
+ return 0;
+}
+
+/* Read request handling */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static void vs_block_server_read_done(struct bio *bio, int err)
+#else
+static void vs_block_server_read_done(struct bio *bio)
+#endif
+{
+ unsigned long flags;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ int err = bio->bi_error;
+#endif
+ struct block_server_request *req = container_of(bio,
+ struct block_server_request, bio);
+ struct block_server *server = req->server;
+ req->op_err = err;
+
+ spin_lock_irqsave(&server->completed_req_lock, flags);
+ if (req->mbuf)
+ list_add(&req->list, &server->completed_req_queue);
+ else
+ list_add_tail(&req->list, &server->completed_req_queue);
+ spin_unlock_irqrestore(&server->completed_req_lock, flags);
+
+ if (req->submitted && atomic_dec_and_test(&server->submitted_req_count))
+ wake_up_all(&server->submitted_req_wq);
+
+ schedule_work(&server->completed_req_work);
+}
+
+/*
+ * TODO: this may need to split and chain the bio if it exceeds the physical
+ * segment limit of the device. Not clear whose responsibility that is; queue
+ * might do it for us (if there is one)
+ */
+#define vs_block_make_request(bio) generic_make_request(bio)
+
+static int vs_block_submit_read(struct block_server *server,
+ struct block_server_request *req, gfp_t gfp)
+{
+ struct request_queue *q = bdev_get_queue(server->bdev);
+ struct bio *bio = &req->bio;
+ int size = req->size;
+ int err = 0;
+
+ if (req->mbuf && vs_block_can_map_pbuf(q, &req->pbuf, size)) {
+ /*
+ * The mbuf is valid and the driver can directly access the
+ * pbuf, so we don't need a bounce buffer. Map the pbuf
+ * directly into the bio.
+ */
+ if (vs_pbuf_resize(&req->pbuf, size) < 0)
+ err = -EIO;
+ if (!err)
+ err = vs_block_bio_map_pbuf(bio, &req->pbuf);
+ } else {
+ /* We need a bounce buffer. First set up the bvecs. */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ bio->bi_iter.bi_size = size;
+#else
+ bio->bi_size = size;
+#endif
+
+ while (size > 0) {
+ struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt];
+
+ BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
+
+ bvec->bv_page = NULL; /* Allocated below */
+ bvec->bv_len = min_t(unsigned, PAGE_SIZE, size);
+ bvec->bv_offset = 0;
+
+ bio->bi_vcnt++;
+ size -= bvec->bv_len;
+ }
+
+ err = bio_alloc_pages(bio, gfp);
+ if (!err) {
+ blk_recount_segments(q, bio);
+ req->bounced = true;
+ }
+ }
+
+ if (err) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ bio->bi_error = err;
+ bio_endio(bio);
+#else
+ bio_endio(bio, err);
+#endif
+ } else {
+ dev_vdbg(&server->service->dev,
+ "submit read req sector %#llx count %#x\n",
+ vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+ req->submitted = true;
+ atomic_inc(&server->submitted_req_count);
+ vs_block_make_request(bio);
+ }
+
+ return 0;
+}
+
+static int vs_block_server_io_req_read(struct vs_server_block_state *state,
+ u32 tagid, u64 sector_index, u32 num_sects, bool nodelay,
+ bool flush)
+{
+ struct block_server *server = state_to_block_server(state);
+ struct bio *bio;
+ struct block_server_request *req;
+ unsigned size = num_sects * server->sector_size;
+ unsigned op_flags = 0;
+
+ /*
+ * This nr_pages calculation assumes that the pbuf data is offset from
+ * the start of the size-aligned message buffer by more than 0 but
+ * less than one sector, which is always true for the current message
+ * layout generated by mill when we assume 512-byte sectors.
+ */
+ unsigned nr_pages = 1 + (size >> PAGE_SHIFT);
+
+ bio = bio_alloc_bioset(GFP_KERNEL, nr_pages, server->bioset);
+ if (!bio)
+ return -ENOMEM;
+ dev_vdbg(&server->service->dev, "alloc r bio %pK\n", bio);
+ req = container_of(bio, struct block_server_request, bio);
+
+ req->server = server;
+ req->tagid = tagid;
+ req->op_err = 0;
+ req->mbuf = NULL;
+ req->size = size;
+ req->bounced = false;
+ req->submitted = false;
+
+ if (flush) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+ op_flags |= REQ_PREFLUSH;
+#else
+ op_flags |= REQ_FLUSH;
+#endif
+ }
+ if (nodelay) {
+ op_flags |= REQ_SYNC;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ bio->bi_iter.bi_sector = (sector_t)sector_index;
+#else
+ bio->bi_sector = (sector_t)sector_index;
+#endif
+ bio->bi_bdev = server->bdev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ bio_set_op_attrs(bio, REQ_OP_READ, op_flags);
+#else
+ bio->bi_rw = READ | op_flags;
+#endif
+ bio->bi_end_io = vs_block_server_read_done;
+
+ req->mbuf = vs_server_block_io_alloc_ack_read(state, &req->pbuf,
+ GFP_KERNEL);
+ if (IS_ERR(req->mbuf) && (PTR_ERR(req->mbuf) == -ENOBUFS)) {
+ /* Fall back to a bounce buffer */
+ req->mbuf = NULL;
+ } else if (IS_ERR(req->mbuf)) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ bio->bi_error = PTR_ERR(req->mbuf);
+ bio_endio(bio);
+#else
+ bio_endio(bio, PTR_ERR(req->mbuf));
+#endif
+ return 0;
+ }
+
+ return vs_block_submit_read(server, req, GFP_KERNEL);
+}
+
+/* Write request handling */
+static int vs_block_submit_bounced_write(struct block_server *server,
+ struct block_server_request *req, gfp_t gfp)
+{
+ struct bio *bio = &req->bio;
+ void *data = req->pbuf.data;
+ struct bio_vec *bv;
+ int i;
+
+ if (bio_alloc_pages(bio, gfp | __GFP_NOWARN) < 0)
+ return -ENOMEM;
+ blk_recount_segments(bdev_get_queue(server->bdev), bio);
+ req->bounced = true;
+
+ /* Copy all the data into the bounce buffer */
+ bio_for_each_segment_all(bv, bio, i) {
+ memcpy(page_address(bv->bv_page) + bv->bv_offset, data,
+ bv->bv_len);
+ data += bv->bv_len;
+ }
+
+ vs_server_block_io_free_req_write(&server->server, &req->pbuf,
+ req->mbuf);
+ req->mbuf = NULL;
+
+ dev_vdbg(&server->service->dev,
+ "submit bounced write req sector %#llx count %#x\n",
+ vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+ req->submitted = true;
+ atomic_inc(&server->submitted_req_count);
+ vs_block_make_request(bio);
+
+ return 0;
+}
+
+static void vs_block_server_write_bounce_work(struct work_struct *work)
+{
+ struct block_server *server = container_of(work, struct block_server,
+ bounce_req_work);
+ struct block_server_request *req;
+
+ spin_lock(&server->bounce_req_lock);
+ while (!list_empty(&server->bounce_req_queue)) {
+ req = list_first_entry(&server->bounce_req_queue,
+ struct block_server_request, list);
+ dev_vdbg(&server->service->dev, "write bio %pK\n", &req->bio);
+ list_del(&req->list);
+ spin_unlock(&server->bounce_req_lock);
+
+ if (vs_block_submit_bounced_write(server, req,
+ GFP_KERNEL) == -ENOMEM) {
+ spin_lock(&server->bounce_req_lock);
+ list_add(&req->list, &server->bounce_req_queue);
+ spin_unlock(&server->bounce_req_lock);
+ schedule_work(work);
+ return;
+ }
+
+ spin_lock(&server->bounce_req_lock);
+ }
+ spin_unlock(&server->bounce_req_lock);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 3, 0)
+static void vs_block_server_write_done(struct bio *bio, int err)
+#else
+static void vs_block_server_write_done(struct bio *bio)
+#endif
+{
+ unsigned long flags;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ int err = bio->bi_error;
+#endif
+ struct block_server_request *req = container_of(bio,
+ struct block_server_request, bio);
+ struct block_server *server = req->server;
+
+ if (req->bounced) {
+ int i;
+ struct bio_vec *bv;
+ bio_for_each_segment_all(bv, bio, i)
+ __free_page(bv->bv_page);
+ } else if (req->mbuf) {
+ vs_server_block_io_free_req_write(&server->server, &req->pbuf,
+ req->mbuf);
+ req->mbuf = NULL;
+ }
+
+ if (req->submitted && atomic_dec_and_test(&server->submitted_req_count))
+ wake_up_all(&server->submitted_req_wq);
+
+ req->op_err = err;
+
+ spin_lock_irqsave(&server->completed_req_lock, flags);
+ list_add_tail(&req->list, &server->completed_req_queue);
+ spin_unlock_irqrestore(&server->completed_req_lock, flags);
+
+ schedule_work(&server->completed_req_work);
+}
+
+static int vs_block_server_io_req_write(struct vs_server_block_state *state,
+ u32 tagid, u64 sector_index, u32 num_sects, bool nodelay,
+ bool flush, bool commit, struct vs_pbuf pbuf, struct vs_mbuf *mbuf)
+{
+ struct block_server *server = state_to_block_server(state);
+ struct request_queue *q = bdev_get_queue(server->bdev);
+ struct bio *bio;
+ struct block_server_request *req;
+ unsigned long data = (unsigned long)pbuf.data;
+ unsigned long start = data >> PAGE_SHIFT;
+ unsigned long end = (data + pbuf.size + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ int err;
+ unsigned op_flags = 0;
+
+ bio = bio_alloc_bioset(GFP_KERNEL, end - start, server->bioset);
+ if (!bio)
+ return -ENOMEM;
+ dev_vdbg(&server->service->dev, "alloc w bio %pK\n", bio);
+ req = container_of(bio, struct block_server_request, bio);
+
+ req->server = server;
+ req->tagid = tagid;
+ req->op_err = 0;
+ req->mbuf = mbuf;
+ req->pbuf = pbuf;
+ req->size = server->sector_size * num_sects;
+ req->bounced = false;
+ req->submitted = false;
+
+ if (flush) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+ op_flags |= REQ_PREFLUSH;
+#else
+ op_flags |= REQ_FLUSH;
+#endif
+ }
+ if (commit) {
+ op_flags |= REQ_FUA;
+ }
+ if (nodelay) {
+ op_flags |= REQ_SYNC;
+ }
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ bio->bi_iter.bi_sector = (sector_t)sector_index;
+#else
+ bio->bi_sector = (sector_t)sector_index;
+#endif
+ bio->bi_bdev = server->bdev;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 8, 0)
+ bio_set_op_attrs(bio, REQ_OP_WRITE, op_flags);
+#else
+ bio->bi_rw = WRITE | op_flags;
+#endif
+ bio->bi_end_io = vs_block_server_write_done;
+
+ if (pbuf.size < req->size) {
+ err = -EINVAL;
+ goto fail_bio;
+ }
+ if (WARN_ON(pbuf.size > req->size))
+ pbuf.size = req->size;
+
+ if (state->readonly) {
+ err = -EROFS;
+ goto fail_bio;
+ }
+
+ if (!vs_block_can_map_pbuf(q, &req->pbuf, req->pbuf.size)) {
+ /* We need a bounce buffer. First set up the bvecs. */
+ int size = pbuf.size;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,14,0)
+ bio->bi_iter.bi_size = size;
+#else
+ bio->bi_size = size;
+#endif
+
+ while (size > 0) {
+ struct bio_vec *bvec = &bio->bi_io_vec[bio->bi_vcnt];
+
+ BUG_ON(bio->bi_vcnt >= bio->bi_max_vecs);
+
+ bvec->bv_page = NULL; /* Allocated later */
+ bvec->bv_len = min_t(unsigned, PAGE_SIZE, size);
+ bvec->bv_offset = 0;
+
+ bio->bi_vcnt++;
+ size -= bvec->bv_len;
+ }
+
+ /*
+ * Defer the rest so we don't have to hold the state lock
+ * during alloc_page & memcpy
+ */
+ spin_lock(&server->bounce_req_lock);
+ list_add_tail(&req->list, &server->bounce_req_queue);
+ spin_unlock(&server->bounce_req_lock);
+ schedule_work(&server->bounce_req_work);
+
+ return 0;
+ }
+
+ /* No bounce needed; map the pbuf directly. */
+ err = vs_block_bio_map_pbuf(bio, &pbuf);
+ if (err < 0)
+ goto fail_bio;
+
+ dev_vdbg(&server->service->dev,
+ "submit direct write req sector %#llx count %#x\n",
+ vs_req_sector_index(req),
+ vs_req_num_sectors(server, req));
+ req->submitted = true;
+ atomic_inc(&server->submitted_req_count);
+ vs_block_make_request(bio);
+
+ return 0;
+
+fail_bio:
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 3, 0)
+ bio->bi_error = err;
+ bio_endio(bio);
+#else
+ bio_endio(bio, err);
+#endif
+ return 0;
+}
+
+static struct block_device *
+vs_block_server_find_by_name(struct block_server *server)
+{
+ struct block_device *bdev = NULL;
+ struct class_dev_iter iter;
+ struct device *dev;
+
+ class_dev_iter_init(&iter, &block_class, NULL, NULL);
+ while (1) {
+ dev = class_dev_iter_next(&iter);
+ if (!dev)
+ break;
+
+ if (strcmp(dev_name(dev), server->service->name) == 0) {
+ bdev = blkdev_get_by_dev(dev->devt,
+ VS_BLOCK_BLKDEV_DEFAULT_MODE, NULL);
+ if (!IS_ERR_OR_NULL(bdev))
+ break;
+ }
+ }
+ class_dev_iter_exit(&iter);
+
+ if (!dev || IS_ERR_OR_NULL(bdev))
+ return ERR_PTR(-ENODEV);
+
+ dev_dbg(&server->service->dev, "Attached to block device %s (%d:%d)\n",
+ dev_name(dev), MAJOR(dev->devt), MINOR(dev->devt));
+ return bdev;
+}
+
+static struct block_device *
+vs_block_server_find_by_path(struct block_server *server, const char *base_path)
+{
+ struct block_device *bdev;
+ char *bdev_path;
+
+ bdev_path = kasprintf(GFP_KERNEL, "%s/%s", base_path,
+ server->service->name);
+ if (!bdev_path)
+ return ERR_PTR(-ENOMEM);
+
+ bdev = blkdev_get_by_path(bdev_path, VS_BLOCK_BLKDEV_DEFAULT_MODE,
+ NULL);
+ dev_dbg(&server->service->dev, "Attached to block device %s\n",
+ bdev_path);
+
+ kfree(bdev_path);
+
+ if (!bdev)
+ return ERR_PTR(-ENODEV);
+ return bdev;
+}
+
+static struct block_device *
+vs_block_server_attach_block_device(struct block_server *server)
+{
+ const char *paths[] = {
+ "/dev",
+ "/dev/block",
+ "/dev/mapper",
+ "/dev/disk/by-partlabel",
+ "/dev/disk/by-label",
+ "/dev/disk/by-partuuid",
+ "/dev/disk/by-uuid"
+ };
+ struct block_device *bdev;
+ int i;
+
+ /*
+ * Try first to look the block device up by path. This is done because
+ * the name exposed to user-space in /dev/ is not necessarily the name
+ * being used inside the kernel for the device.
+ */
+ for (i = 0; i < ARRAY_SIZE(paths); i++) {
+ bdev = vs_block_server_find_by_path(server, paths[i]);
+ if (!IS_ERR(bdev))
+ break;
+ }
+ if (i == ARRAY_SIZE(paths)) {
+ /*
+ * Couldn't find the block device in any of the usual places.
+ * Try to match it against the kernel's device name. If the
+ * name of the service and the name of a device in the block
+ * class match then attempt to look the block device up by the
+ * dev_t (major/minor) value.
+ */
+ bdev = vs_block_server_find_by_name(server);
+ }
+ if (IS_ERR(bdev))
+ return bdev;
+
+ server->sector_size = VS_BLOCK_BLK_DEF_SECTOR_SIZE;
+ server->server.segment_size = round_down(
+ vs_service_max_mbuf_size(server->service) -
+ sizeof(vs_message_id_t), server->sector_size);
+ server->server.sector_size = server->sector_size;
+ server->server.device_sectors = bdev->bd_part->nr_sects;
+ if (bdev_read_only(bdev))
+ server->server.readonly = true;
+ server->server.flushable = true;
+ server->server.committable = true;
+
+ return bdev;
+}
+
+static struct vs_server_block_state *
+vs_block_server_alloc(struct vs_service_device *service)
+{
+ struct block_server *server;
+ int err;
+
+ server = kzalloc(sizeof(*server), GFP_KERNEL);
+ if (!server)
+ return NULL;
+
+ server->service = service;
+ server->started = false;
+ INIT_LIST_HEAD(&server->bounce_req_queue);
+ INIT_WORK(&server->bounce_req_work, vs_block_server_write_bounce_work);
+ spin_lock_init(&server->bounce_req_lock);
+ atomic_set(&server->submitted_req_count, 0);
+ init_waitqueue_head(&server->submitted_req_wq);
+ INIT_LIST_HEAD(&server->completed_req_queue);
+ INIT_WORK(&server->completed_req_work,
+ vs_block_server_complete_requests_work);
+ spin_lock_init(&server->completed_req_lock);
+
+ server->bdev = vs_block_server_attach_block_device(server);
+ if (IS_ERR(server->bdev)) {
+ dev_err(&server->service->dev,
+ "No appropriate block device was found to satisfy the service name %s - error %ld\n",
+ server->service->name, PTR_ERR(server->bdev));
+ goto fail_attach_device;
+ }
+
+ dev_set_drvdata(&service->dev, &server->server);
+
+ err = sysfs_create_group(&service->dev.kobj,
+ &vs_block_server_attr_group);
+ if (err) {
+ dev_err(&service->dev,
+ "Failed to create attribute group for service %s\n",
+ service->name);
+ goto fail_create_group;
+ }
+
+ /*
+ * We know the upper bound on simultaneously active bios (i.e. the
+ * smaller of the in quota, and the sum of the read and write command
+ * tag limits), so we can pre-allocate that many, and hopefully never
+ * fail to allocate one in a request handler.
+ *
+ * However, allocation may fail if the number of pages (and thus
+ * bvecs) in a request exceeds BIO_INLINE_VECS (which is hard-coded to
+ * 4 in all mainline kernels). That possibility is the only reason we
+ * can't enable rx_atomic for this driver.
+ */
+ server->bioset = bioset_create(min_t(unsigned, service->recv_quota,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING +
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING),
+ offsetof(struct block_server_request, bio));
+ if (!server->bioset) {
+ dev_err(&service->dev,
+ "Failed to allocate bioset for service %s\n",
+ service->name);
+ goto fail_create_bioset;
+ }
+
+ dev_dbg(&service->dev, "New block server %pK\n", server);
+
+ return &server->server;
+
+fail_create_bioset:
+ sysfs_remove_group(&server->service->dev.kobj,
+ &vs_block_server_attr_group);
+fail_create_group:
+ dev_set_drvdata(&service->dev, NULL);
+ blkdev_put(server->bdev, VS_BLOCK_BLKDEV_DEFAULT_MODE);
+fail_attach_device:
+ kfree(server);
+
+ return NULL;
+}
+
+static void vs_block_server_release(struct vs_server_block_state *state)
+{
+ struct block_server *server = state_to_block_server(state);
+
+ cancel_work_sync(&server->bounce_req_work);
+ cancel_work_sync(&server->completed_req_work);
+
+ blkdev_put(server->bdev, VS_BLOCK_BLKDEV_DEFAULT_MODE);
+
+ sysfs_remove_group(&server->service->dev.kobj,
+ &vs_block_server_attr_group);
+
+ bioset_free(server->bioset);
+
+ kfree(server);
+}
+
+static struct vs_server_block block_server_driver = {
+ .alloc = vs_block_server_alloc,
+ .release = vs_block_server_release,
+ .open = vs_block_server_open,
+ .closed = vs_block_server_closed,
+ .tx_ready = vs_block_server_tx_ready,
+ .io = {
+ .req_read = vs_block_server_io_req_read,
+ .req_write = vs_block_server_io_req_write,
+ },
+
+ /* Large default quota for batching read/write commands */
+ .in_quota_best = 32,
+ .out_quota_best = 32,
+};
+
+static int __init vs_block_server_init(void)
+{
+ return vservice_block_server_register(&block_server_driver,
+ "block_server_driver");
+}
+
+static void __exit vs_block_server_exit(void)
+{
+ vservice_block_server_unregister(&block_server_driver);
+}
+
+module_init(vs_block_server_init);
+module_exit(vs_block_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Block Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c
index 44bccb1..8dce1a8 100644
--- a/drivers/bluetooth/btusb.c
+++ b/drivers/bluetooth/btusb.c
@@ -349,6 +349,7 @@
{ USB_DEVICE(0x7392, 0xa611), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8723DE Bluetooth devices */
+ { USB_DEVICE(0x0bda, 0xb009), .driver_info = BTUSB_REALTEK },
{ USB_DEVICE(0x2ff8, 0xb011), .driver_info = BTUSB_REALTEK },
/* Additional Realtek 8821AE Bluetooth devices */
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index b0d0181..87d8c3c 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -623,3 +623,49 @@
endmenu
+config OKL4_PIPE
+ bool "OKL4 Pipe Driver"
+ depends on OKL4_GUEST
+ default n
+ help
+ Virtual pipe driver for the OKL4 Microvisor. This driver allows
+ OKL4 Microvisor pipes to be exposed directly to user level as
+ character devices.
+
+config VSERVICES_SERIAL
+ tristate
+
+config VSERVICES_SERIAL_SERVER
+ tristate "Virtual Services serial server"
+ depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+ select VSERVICES_SERIAL
+ select VSERVICES_PROTOCOL_SERIAL_SERVER
+ default y
+ help
+ Select this option if you want support for server side Virtual
+ Services serial. A virtual serial service behaves similarly to
+ a UNIX pseudo terminal (pty), and does not require any physical
+ serial hardware. Virtual serial devices are typically called
+ /dev/ttyVS0, /dev/ttyVS1, etc.
+
+config VSERVICES_SERIAL_CLIENT
+ tristate "Virtual Services serial client"
+ depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+ select VSERVICES_SERIAL
+ select VSERVICES_PROTOCOL_SERIAL_CLIENT
+ default y
+ help
+ Select this option if you want support for client side Virtual
+ Services serial. A virtual serial service behaves similarly to
+ a UNIX pseudo terminal (pty), and does not require any physical
+ serial hardware. Virtual serial devices are typically called
+ /dev/ttyVS0, /dev/ttyVS1, etc.
+
+config VSERVICES_VTTY_COUNT
+ int "Maximum number of Virtual Services serial devices"
+ depends on VSERVICES_SERIAL
+ range 0 256
+ default "8"
+ help
+ The maximum number of Virtual Services serial devices to support.
+ This limit applies to both the client and server.
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index 81283c4..a00142a 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -67,3 +67,11 @@
obj-$(CONFIG_MSM_ADSPRPC) += adsprpc_compat.o
endif
obj-$(CONFIG_MSM_RDBG) += rdbg.o
+obj-$(CONFIG_OKL4_PIPE) += okl4_pipe.o
+CFLAGS_okl4_pipe.o += -Werror
+obj-$(CONFIG_VSERVICES_SERIAL) += vservices_serial.o
+CFLAGS_vservices_serial.o += -Werror
+obj-$(CONFIG_VSERVICES_SERIAL_CLIENT) += vs_serial_client.o
+CFLAGS_vs_serial_client.o += -Werror
+obj-$(CONFIG_VSERVICES_SERIAL_SERVER) += vs_serial_server.o
+CFLAGS_vs_serial_server.o += -Werror
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 67677ea..b5af2e2 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -645,21 +645,20 @@
return -ENOTTY;
}
-static int dma_alloc_memory(dma_addr_t *region_phys, size_t size,
+static int dma_alloc_memory(dma_addr_t *region_phys, void **vaddr, size_t size,
unsigned long dma_attrs)
{
struct fastrpc_apps *me = &gfa;
- void *vaddr = NULL;
if (me->dev == NULL) {
pr_err("device adsprpc-mem is not initialized\n");
return -ENODEV;
}
- vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
+ *vaddr = dma_alloc_attrs(me->dev, size, region_phys, GFP_KERNEL,
dma_attrs);
- if (!vaddr) {
- pr_err("ADSPRPC: Failed to allocate %x remote heap memory\n",
- (unsigned int)size);
+ if (IS_ERR_OR_NULL(*vaddr)) {
+ pr_err("adsprpc: %s: %s: dma_alloc_attrs failed for size 0x%zx, returned %pK\n",
+ current->comm, __func__, size, (*vaddr));
return -ENOMEM;
}
return 0;
@@ -798,6 +797,7 @@
struct fastrpc_mmap *map = NULL;
unsigned long attrs;
dma_addr_t region_phys = 0;
+ void *region_vaddr = NULL;
unsigned long flags;
int err = 0, vmid;
@@ -820,12 +820,13 @@
map->apps = me;
map->fl = NULL;
- VERIFY(err, !dma_alloc_memory(®ion_phys, len, dma_attrs));
+ VERIFY(err, !dma_alloc_memory(®ion_phys, ®ion_vaddr,
+ len, dma_attrs));
if (err)
goto bail;
map->phys = (uintptr_t)region_phys;
map->size = len;
- map->va = (uintptr_t)map->phys;
+ map->va = (uintptr_t)region_vaddr;
} else if (mflags == FASTRPC_DMAHANDLE_NOMAP) {
ion_phys_addr_t iphys;
@@ -2707,14 +2708,15 @@
1, &rbuf);
if (err)
goto bail;
- rbuf->virt = NULL;
- err = fastrpc_mmap_on_dsp(fl, ud->flags,
- (uintptr_t)rbuf->virt,
+ err = fastrpc_mmap_on_dsp(fl, ud->flags, 0,
rbuf->phys, rbuf->size, &raddr);
if (err)
goto bail;
rbuf->raddr = raddr;
} else {
+
+ uintptr_t va_to_dsp;
+
mutex_lock(&fl->fl_map_mutex);
if (!fastrpc_mmap_find(fl, ud->fd, (uintptr_t)ud->vaddrin,
ud->size, ud->flags, 1, &map)) {
@@ -2722,13 +2724,20 @@
mutex_unlock(&fl->map_mutex);
return 0;
}
+
VERIFY(err, !fastrpc_mmap_create(fl, ud->fd, 0,
(uintptr_t)ud->vaddrin, ud->size,
ud->flags, &map));
mutex_unlock(&fl->fl_map_mutex);
if (err)
goto bail;
- VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, map->va,
+
+ if (ud->flags == ADSP_MMAP_HEAP_ADDR ||
+ ud->flags == ADSP_MMAP_REMOTE_HEAP_ADDR)
+ va_to_dsp = 0;
+ else
+ va_to_dsp = (uintptr_t)map->va;
+ VERIFY(err, 0 == fastrpc_mmap_on_dsp(fl, ud->flags, va_to_dsp,
map->phys, map->size, &raddr));
if (err)
goto bail;
diff --git a/drivers/char/adsprpc_compat.c b/drivers/char/adsprpc_compat.c
index ea7967c..3b54243 100644
--- a/drivers/char/adsprpc_compat.c
+++ b/drivers/char/adsprpc_compat.c
@@ -132,6 +132,11 @@
compat_uint_t level; /* level of control */
};
+#define FASTRPC_CONTROL_SMMU (2)
+struct compat_fastrpc_ctrl_smmu {
+ compat_uint_t sharedcb;
+};
+
#define FASTRPC_CONTROL_KALLOC (3)
struct compat_fastrpc_ctrl_kalloc {
compat_uint_t kalloc_support; /* Remote memory allocation from kernel */
@@ -141,6 +146,7 @@
compat_uint_t req;
union {
struct compat_fastrpc_ctrl_latency lp;
+ struct compat_fastrpc_ctrl_smmu smmu;
struct compat_fastrpc_ctrl_kalloc kalloc;
};
};
diff --git a/drivers/char/diag/Kconfig b/drivers/char/diag/Kconfig
index a7b6e75..07d675e 100644
--- a/drivers/char/diag/Kconfig
+++ b/drivers/char/diag/Kconfig
@@ -37,7 +37,7 @@
config DIAG_OVER_PCIE
bool "Enable Diag traffic to go over PCIE"
depends on DIAG_CHAR
- depends on MSM_MHI
+ depends on MSM_MHI_DEV
help
Diag over PCIE enables sending diag traffic over PCIE endpoint when
pcie is available. Diag PCIE channels should be configured
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index a089e7c..0937b2f 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -2303,8 +2303,8 @@
pid_struct = find_get_pid(entry->tgid);
if (!pid_struct) {
DIAG_LOG(DIAG_DEBUG_DCI,
- "diag: valid pid doesn't exist for pid = %d\n",
- entry->tgid);
+ "diag: Exited pid (%d) doesn't match dci client of pid (%d)\n",
+ tgid, entry->tgid);
continue;
}
task_s = get_pid_task(pid_struct, PIDTYPE_PID);
diff --git a/drivers/char/diag/diag_mux.c b/drivers/char/diag/diag_mux.c
index 52a57bb..ae59175 100644
--- a/drivers/char/diag/diag_mux.c
+++ b/drivers/char/diag/diag_mux.c
@@ -127,7 +127,7 @@
pcie_logger.ops[proc] = ops;
err = diag_pcie_register(proc, ctx, ops);
if (err) {
- driver->transport_set == DIAG_ROUTE_TO_USB;
+ driver->transport_set = DIAG_ROUTE_TO_USB;
diag_mux->logger = &usb_logger;
diag_mux->mode = DIAG_USB_MODE;
usb_logger.ops[proc] = ops;
diff --git a/drivers/char/diag/diag_pcie.c b/drivers/char/diag/diag_pcie.c
index 8353abc..8f53573 100644
--- a/drivers/char/diag/diag_pcie.c
+++ b/drivers/char/diag/diag_pcie.c
@@ -33,7 +33,7 @@
{
.id = DIAG_PCIE_LOCAL,
.name = DIAG_LEGACY,
- .enabled = 0,
+ .enabled = {0},
.mempool = POOL_TYPE_MUX_APPS,
.ops = NULL,
.wq = NULL,
@@ -171,6 +171,8 @@
diag_ws_on_copy_complete(DIAG_WS_MUX);
spin_unlock_irqrestore(&ch->write_lock, flags);
diagmem_free(driver, req, ch->mempool);
+ kfree(ctxt);
+ ctxt = NULL;
return;
}
DIAG_LOG(DIAG_DEBUG_MUX, "full write_done, ctxt: %pK\n",
@@ -281,19 +283,19 @@
bytes_to_write = mhi_dev_write_channel(req);
diag_ws_on_copy(DIAG_WS_MUX);
if (bytes_to_write != write_len) {
- pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d\n",
+ pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d, write_len: %d\n",
__func__, pcie_info->name,
- bytes_to_write);
+ bytes_to_write, write_len);
DIAG_LOG(DIAG_DEBUG_MUX,
- "ERR! unable to write to pcie, err: %d\n",
- bytes_to_write);
+ "ERR! unable to write to pcie, err: %d, write_len: %d\n",
+ bytes_to_write, write_len);
diag_ws_on_copy_fail(DIAG_WS_MUX);
spin_lock_irqsave(&pcie_info->write_lock, flags);
diag_pcie_buf_tbl_remove(pcie_info, buf);
kfree(req->context);
diagmem_free(driver, req, pcie_info->mempool);
spin_unlock_irqrestore(&pcie_info->write_lock, flags);
- return bytes_to_write;
+ return -EINVAL;
}
offset += write_len;
bytes_remaining -= write_len;
@@ -365,17 +367,18 @@
bytes_to_write = mhi_dev_write_channel(req);
diag_ws_on_copy(DIAG_WS_MUX);
if (bytes_to_write != len) {
- pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d\n",
- __func__, pcie_info->name, bytes_to_write);
+ pr_err_ratelimited("diag: In %s, error writing to pcie channel %s, err: %d len: %d\n",
+ __func__, pcie_info->name, bytes_to_write, len);
diag_ws_on_copy_fail(DIAG_WS_MUX);
DIAG_LOG(DIAG_DEBUG_MUX,
- "ERR! unable to write to pcie, err: %d\n",
- bytes_to_write);
+ "ERR! unable to write to pcie, err: %d len: %d\n",
+ bytes_to_write, len);
spin_lock_irqsave(&pcie_info->write_lock, flags);
diag_pcie_buf_tbl_remove(pcie_info, buf);
spin_unlock_irqrestore(&pcie_info->write_lock, flags);
kfree(req->context);
diagmem_free(driver, req, pcie_info->mempool);
+ return -EINVAL;
}
DIAG_LOG(DIAG_DEBUG_MUX, "wrote packet to pcie chan:%d, len:%d",
pcie_info->out_chan, len);
@@ -475,10 +478,6 @@
static void diag_pcie_connect(struct diag_pcie_info *ch)
{
- int err = 0;
- int num_write = 0;
- int num_read = 1; /* Only one read buffer for any pcie channel */
-
if (!ch || !atomic_read(&ch->enabled))
return;
diff --git a/drivers/char/diag/diag_usb.c b/drivers/char/diag/diag_usb.c
index 24a9f8a..f960e8b 100644
--- a/drivers/char/diag/diag_usb.c
+++ b/drivers/char/diag/diag_usb.c
@@ -219,13 +219,6 @@
*/
static void usb_disconnect(struct diag_usb_info *ch)
{
- if (!ch)
- return;
-
- if (!atomic_read(&ch->connected) &&
- driver->usb_connected && diag_mask_param())
- diag_clear_masks(0);
-
if (ch && ch->ops && ch->ops->close)
ch->ops->close(ch->ctxt, DIAG_USB_MODE);
}
@@ -234,6 +227,14 @@
{
struct diag_usb_info *ch = container_of(work, struct diag_usb_info,
disconnect_work);
+
+ if (!ch)
+ return;
+
+ if (!atomic_read(&ch->connected) &&
+ driver->usb_connected && diag_mask_param())
+ diag_clear_masks(0);
+
usb_disconnect(ch);
}
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index 1cfd4b7..cb04195 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -557,8 +557,8 @@
{
int ret;
- DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: process exit %s\n",
- current->comm);
+ DIAG_LOG(DIAG_DEBUG_USERSPACE, "diag: %s process exit with pid = %d\n",
+ current->comm, current->tgid);
ret = diag_remove_client_entry(file);
return ret;
@@ -3282,6 +3282,8 @@
int exit_stat = 0;
int write_len = 0;
struct diag_md_session_t *session_info = NULL;
+ struct pid *pid_struct = NULL;
+ struct task_struct *task_s = NULL;
mutex_lock(&driver->diagchar_mutex);
for (i = 0; i < driver->num_clients; i++)
@@ -3526,8 +3528,19 @@
list_for_each_safe(start, temp, &driver->dci_client_list) {
entry = list_entry(start, struct diag_dci_client_tbl,
track);
- if (entry->client->tgid != current->tgid)
+ pid_struct = find_get_pid(entry->tgid);
+ if (!pid_struct)
continue;
+ task_s = get_pid_task(pid_struct, PIDTYPE_PID);
+ if (!task_s) {
+ DIAG_LOG(DIAG_DEBUG_DCI,
+ "diag: valid task doesn't exist for pid = %d\n",
+ entry->tgid);
+ continue;
+ }
+ if (task_s == entry->client)
+ if (entry->client->tgid != current->tgid)
+ continue;
if (!entry->in_service)
continue;
if (copy_to_user(buf + ret, &data_type, sizeof(int))) {
diff --git a/drivers/char/okl4_pipe.c b/drivers/char/okl4_pipe.c
new file mode 100644
index 0000000..e7a0d8a
--- /dev/null
+++ b/drivers/char/okl4_pipe.c
@@ -0,0 +1,677 @@
+/*
+ * drivers/char/okl4_pipe.c
+ *
+ * Copyright (c) 2015 General Dynamics
+ * Copyright (c) 2015 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * OKL4 Microvisor Pipes driver.
+ *
+ * Clients using this driver must have vclient names of the form
+ * "pipe%d", where %d is the pipe number, which must be
+ * unique and less than MAX_PIPES.
+ */
+
+/* #define DEBUG 1 */
+/* #define VERBOSE_DEBUG 1 */
+
+#include <linux/init.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/mutex.h>
+#include <linux/delay.h>
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <linux/cdev.h>
+#include <linux/poll.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/of.h>
+#include <asm/uaccess.h>
+#include <asm-generic/okl4_virq.h>
+
+#include <microvisor/microvisor.h>
+#if defined(CONFIG_OKL4_VIRTUALISATION)
+#include <asm/okl4-microvisor/okl4tags.h>
+#include <asm/okl4-microvisor/microvisor_bus.h>
+#include <asm/okl4-microvisor/virq.h>
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,8,0)
+#define __devinit
+#define __devexit
+#define __devexit_p(x) x
+#endif
+
+#define DRIVER_NAME "okl4-pipe"
+#define DEVICE_NAME "okl4-pipe"
+
+#ifndef CONFIG_OF
+#error "okl4-pipe driver only supported on device tree kernels"
+#endif
+
+#define MAX_PIPES 8
+
+#ifdef CONFIG_OKL4_INTERLEAVED_PRIORITIES
+extern int vcpu_prio_normal;
+#endif
+
+static int okl4_pipe_major;
+static struct class *okl4_pipe_class;
+
+/* This can be extended if required */
+struct okl4_pipe_mv {
+ int pipe_id;
+};
+
+struct okl4_pipe {
+ struct okl4_pipe_data_buffer *write_buf;
+ okl4_kcap_t pipe_tx_kcap;
+ okl4_kcap_t pipe_rx_kcap;
+ int tx_irq;
+ int rx_irq;
+ size_t max_msg_size;
+ int ref_count;
+ struct mutex pipe_mutex;
+ spinlock_t pipe_lock;
+
+ struct platform_device *pdev;
+ struct cdev cdev;
+
+ bool reset;
+ bool tx_maybe_avail;
+ bool rx_maybe_avail;
+
+ wait_queue_head_t rx_wait_q;
+ wait_queue_head_t tx_wait_q;
+ wait_queue_head_t poll_wait_q;
+
+ char *rx_buf;
+ size_t rx_buf_count;
+};
+static struct okl4_pipe pipes[MAX_PIPES];
+
+static okl4_error_t
+okl4_pipe_control(okl4_kcap_t kcap, uint8_t control)
+{
+ okl4_pipe_control_t x = 0;
+
+ okl4_pipe_control_setdoop(&x, true);
+ okl4_pipe_control_setoperation(&x, control);
+ return _okl4_sys_pipe_control(kcap, x);
+}
+
+static irqreturn_t
+okl4_pipe_tx_irq(int irq, void *dev)
+{
+ struct okl4_pipe *pipe = dev;
+ okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+ spin_lock(&pipe->pipe_lock);
+ if (okl4_pipe_state_gettxavailable(&payload))
+ pipe->tx_maybe_avail = true;
+ if (okl4_pipe_state_getreset(&payload)) {
+ pipe->reset = true;
+ pipe->tx_maybe_avail = true;
+ }
+ spin_unlock(&pipe->pipe_lock);
+
+ wake_up_interruptible(&pipe->tx_wait_q);
+ wake_up_interruptible(&pipe->poll_wait_q);
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t
+okl4_pipe_rx_irq(int irq, void *dev)
+{
+ struct okl4_pipe *pipe = dev;
+ okl4_pipe_state_t payload = okl4_get_virq_payload(irq);
+
+ spin_lock(&pipe->pipe_lock);
+ if (okl4_pipe_state_getrxavailable(&payload))
+ pipe->rx_maybe_avail = true;
+ if (okl4_pipe_state_getreset(&payload)) {
+ pipe->reset = true;
+ pipe->rx_maybe_avail = true;
+ }
+ spin_unlock(&pipe->pipe_lock);
+
+ wake_up_interruptible(&pipe->rx_wait_q);
+ wake_up_interruptible(&pipe->poll_wait_q);
+
+ return IRQ_HANDLED;
+}
+
+static ssize_t
+okl4_pipe_read(struct file *filp, char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct okl4_pipe_mv *priv = filp->private_data;
+ int pipe_id = priv->pipe_id;
+ struct okl4_pipe *pipe = &pipes[pipe_id];
+ struct _okl4_sys_pipe_recv_return recv_return;
+ uint32_t *buffer = NULL;
+ size_t recv = 0;
+
+ if (!count)
+ return 0;
+
+again:
+ if (pipe->reset)
+ return -EPIPE;
+
+ if (!pipe->rx_maybe_avail && (filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ if (wait_event_interruptible(pipe->rx_wait_q, pipe->rx_maybe_avail))
+ return -ERESTARTSYS;
+
+ if (mutex_lock_interruptible(&pipe->pipe_mutex))
+ return -ERESTARTSYS;
+
+ /* Receive buffered data first */
+ if (pipe->rx_buf_count) {
+ recv = min(pipe->rx_buf_count, count);
+
+ if (copy_to_user(buf, pipe->rx_buf, recv)) {
+ mutex_unlock(&pipe->pipe_mutex);
+ return -EFAULT;
+ }
+
+ pipe->rx_buf_count -= recv;
+
+ if (pipe->rx_buf_count) {
+ memmove(pipe->rx_buf, pipe->rx_buf + recv,
+ pipe->max_msg_size - recv);
+ }
+
+ buf += recv;
+ count -= recv;
+ if (!count) {
+ mutex_unlock(&pipe->pipe_mutex);
+ return recv;
+ }
+ }
+
+ buffer = kmalloc(pipe->max_msg_size + sizeof(uint32_t), GFP_KERNEL);
+
+ if (!buffer) {
+ mutex_unlock(&pipe->pipe_mutex);
+ return -ENOMEM;
+ }
+
+ while (count) {
+ okl4_error_t ret;
+ size_t size;
+
+ spin_lock_irq(&pipe->pipe_lock);
+ recv_return = _okl4_sys_pipe_recv(pipe->pipe_rx_kcap,
+ pipe->max_msg_size + sizeof(uint32_t),
+ (void *)buffer);
+ ret = recv_return.error;
+
+ if (ret == OKL4_ERROR_PIPE_NOT_READY ||
+ ret == OKL4_ERROR_PIPE_EMPTY) {
+ pipe->rx_maybe_avail = false;
+ if (!recv) {
+ if (!(filp->f_flags & O_NONBLOCK)) {
+ spin_unlock_irq(&pipe->pipe_lock);
+ mutex_unlock(&pipe->pipe_mutex);
+ kfree(buffer);
+ goto again;
+ }
+ recv = -EAGAIN;
+ }
+ goto error;
+ } else if (ret != OKL4_OK) {
+ dev_err(&pipe->pdev->dev,
+ "pipe send returned error %d in okl4_pipe driver!\n",
+ (int)ret);
+ if (!recv)
+ recv = -ENXIO;
+ goto error;
+ }
+
+ spin_unlock_irq(&pipe->pipe_lock);
+
+ size = buffer[0];
+ if (size > pipe->max_msg_size) {
+ /* pipe error */
+ if (!recv)
+ recv = -EPROTO;
+ goto out;
+ }
+
+ /* Save extra received data */
+ if (size > count) {
+ pipe->rx_buf_count = size - count;
+ memcpy(pipe->rx_buf, (char*)&buffer[1] + count,
+ size - count);
+ size = count;
+ }
+
+ if (copy_to_user(buf, &buffer[1], size)) {
+ if (!recv)
+ recv = -EFAULT;
+ goto out;
+ }
+
+
+ count -= size;
+ buf += size;
+ recv += size;
+ }
+out:
+ mutex_unlock(&pipe->pipe_mutex);
+
+ kfree(buffer);
+ return recv;
+error:
+ spin_unlock_irq(&pipe->pipe_lock);
+ goto out;
+}
+
+static ssize_t
+okl4_pipe_write(struct file *filp, const char __user *buf, size_t count,
+ loff_t *f_pos)
+{
+ struct okl4_pipe_mv *priv = filp->private_data;
+ int pipe_id = priv->pipe_id;
+ struct okl4_pipe *pipe = &pipes[pipe_id];
+ uint32_t *buffer = NULL;
+ size_t sent = 0;
+
+ if (!count)
+ return 0;
+
+again:
+ if (pipe->reset)
+ return -EPIPE;
+
+ if (!pipe->tx_maybe_avail && (filp->f_flags & O_NONBLOCK))
+ return -EAGAIN;
+
+ if (wait_event_interruptible(pipe->tx_wait_q, pipe->tx_maybe_avail))
+ return -ERESTARTSYS;
+
+ if (mutex_lock_interruptible(&pipe->pipe_mutex))
+ return -ERESTARTSYS;
+
+ buffer = kmalloc(pipe->max_msg_size + sizeof(uint32_t), GFP_KERNEL);
+
+ if (!buffer) {
+ mutex_unlock(&pipe->pipe_mutex);
+ return -ENOMEM;
+ }
+
+ while (count) {
+ okl4_error_t ret;
+ size_t size = min(count, pipe->max_msg_size);
+ size_t pipe_size = roundup(size + sizeof(uint32_t),
+ sizeof(uint32_t));
+
+ if (copy_from_user(&buffer[1], buf, size)) {
+ if (!sent)
+ sent = -EFAULT;
+ break;
+ }
+
+ buffer[0] = size;
+
+ spin_lock_irq(&pipe->pipe_lock);
+ ret = _okl4_sys_pipe_send(pipe->pipe_tx_kcap, pipe_size,
+ (void *)buffer);
+ if (ret == OKL4_ERROR_PIPE_NOT_READY ||
+ ret == OKL4_ERROR_PIPE_FULL) {
+ pipe->tx_maybe_avail = false;
+ spin_unlock_irq(&pipe->pipe_lock);
+ if (!sent) {
+ if (filp->f_flags & O_NONBLOCK) {
+ sent = -EAGAIN;
+ break;
+ }
+ mutex_unlock(&pipe->pipe_mutex);
+ kfree(buffer);
+ goto again;
+ }
+ break;
+ } else if (ret != OKL4_OK) {
+ dev_err(&pipe->pdev->dev,
+ "pipe send returned error %d in okl4_pipe driver!\n",
+ (int)ret);
+ if (!sent)
+ sent = -ENXIO;
+ spin_unlock_irq(&pipe->pipe_lock);
+ break;
+ }
+ spin_unlock_irq(&pipe->pipe_lock);
+
+ count -= size;
+ buf += size;
+ sent += size;
+ }
+ mutex_unlock(&pipe->pipe_mutex);
+
+ kfree(buffer);
+ return sent;
+}
+
+
+static unsigned int
+okl4_pipe_poll(struct file *filp, struct poll_table_struct *poll_table)
+{
+ struct okl4_pipe_mv *priv = filp->private_data;
+ int pipe_id = priv->pipe_id;
+ struct okl4_pipe *pipe = &pipes[pipe_id];
+ unsigned int ret = 0;
+
+ poll_wait(filp, &pipe->poll_wait_q, poll_table);
+
+ spin_lock_irq(&pipe->pipe_lock);
+
+ if (pipe->rx_maybe_avail)
+ ret |= POLLIN | POLLRDNORM;
+ if (pipe->tx_maybe_avail)
+ ret |= POLLOUT | POLLWRNORM;
+ if (pipe->reset)
+ ret = POLLHUP;
+
+ spin_unlock_irq(&pipe->pipe_lock);
+
+ return ret;
+}
+
+static int
+okl4_pipe_open(struct inode *inode, struct file *filp)
+{
+ struct okl4_pipe *pipe = container_of(inode->i_cdev,
+ struct okl4_pipe, cdev);
+ struct okl4_pipe_mv *priv = dev_get_drvdata(&pipe->pdev->dev);
+
+ filp->private_data = priv;
+ if (!pipe->ref_count) {
+ pipe->rx_buf = kmalloc(pipe->max_msg_size, GFP_KERNEL);
+ if (!pipe->rx_buf)
+ return -ENOMEM;
+
+ mutex_init(&pipe->pipe_mutex);
+ spin_lock_init(&pipe->pipe_lock);
+
+ pipe->rx_buf_count = 0;
+ pipe->reset = false;
+ pipe->tx_maybe_avail = true;
+ pipe->rx_maybe_avail = true;
+
+ okl4_pipe_control(pipe->pipe_tx_kcap,
+ OKL4_PIPE_CONTROL_OP_SET_TX_READY);
+ okl4_pipe_control(pipe->pipe_rx_kcap,
+ OKL4_PIPE_CONTROL_OP_SET_RX_READY);
+ }
+ pipe->ref_count++;
+ return 0;
+}
+
+static int
+okl4_pipe_close(struct inode *inode, struct file *filp)
+{
+ struct okl4_pipe *pipe = container_of(inode->i_cdev,
+ struct okl4_pipe, cdev);
+
+ pipe->ref_count--;
+ if (!pipe->ref_count) {
+ okl4_pipe_control(pipe->pipe_rx_kcap,
+ OKL4_PIPE_CONTROL_OP_RESET);
+ okl4_pipe_control(pipe->pipe_tx_kcap,
+ OKL4_PIPE_CONTROL_OP_RESET);
+
+ if (pipe->rx_buf)
+ kfree(pipe->rx_buf);
+ pipe->rx_buf = NULL;
+ pipe->rx_buf_count = 0;
+ }
+
+ return 0;
+}
+
+struct file_operations okl4_pipe_fops = {
+ .owner = THIS_MODULE,
+ .read = okl4_pipe_read,
+ .write = okl4_pipe_write,
+ .open = okl4_pipe_open,
+ .release = okl4_pipe_close,
+ .poll = okl4_pipe_poll,
+};
+
+static int __devinit
+okl4_pipe_probe(struct platform_device *pdev)
+{
+ struct okl4_pipe *pipe;
+ int err, pipe_id;
+ struct okl4_pipe_mv *priv;
+ dev_t dev_num;
+ struct device *device = NULL;
+ u32 reg[2];
+ struct resource *irq;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(struct okl4_pipe_mv),
+ GFP_KERNEL);
+ if (priv == NULL) {
+ err = -ENOMEM;
+ goto fail_alloc_priv;
+ }
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ pipe_id = of_alias_get_id(pdev->dev.of_node, "pipe");
+ if (pipe_id < 0) {
+ err = -ENXIO;
+ goto fail_pipe_id;
+ }
+
+ if (pipe_id < 0 || pipe_id >= MAX_PIPES) {
+ err = -ENXIO;
+ goto fail_pipe_id;
+ }
+
+ if (of_property_read_u32_array(pdev->dev.of_node, "reg", reg, 2)) {
+ dev_err(&pdev->dev, "need 2 reg resources\n");
+ err = -ENODEV;
+ goto fail_pipe_id;
+ }
+
+ /* Populate the private structure */
+ priv->pipe_id = pipe_id;
+
+ pipe = &pipes[pipe_id];
+
+ /* Set up and register the pipe device */
+ pipe->pdev = pdev;
+ dev_set_name(&pdev->dev, "%s%d", DEVICE_NAME, (int)pipe_id);
+
+ pipe->ref_count = 0;
+ pipe->pipe_tx_kcap = reg[0];
+ pipe->pipe_rx_kcap = reg[1];
+ pipe->max_msg_size = 64;
+
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+ if (!irq) {
+ dev_err(&pdev->dev, "no tx irq resource?\n");
+ err = -ENODEV;
+ goto fail_irq_resource;
+ }
+ pipe->tx_irq = irq->start;
+ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
+ if (!irq) {
+ dev_err(&pdev->dev, "no rx irq resource?\n");
+ err = -ENODEV;
+ goto fail_irq_resource;
+ }
+ pipe->rx_irq = irq->start;
+
+ pipe->write_buf = kmalloc(sizeof(pipe->write_buf), GFP_KERNEL);
+ if (!pipe->write_buf) {
+ dev_err(&pdev->dev, "cannot allocate write buffer\n");
+ err = -ENOMEM;
+ goto fail_malloc_write;
+ }
+
+ init_waitqueue_head(&pipe->rx_wait_q);
+ init_waitqueue_head(&pipe->tx_wait_q);
+ init_waitqueue_head(&pipe->poll_wait_q);
+
+ err = devm_request_irq(&pdev->dev, pipe->rx_irq,
+ okl4_pipe_rx_irq, 0, dev_name(&pdev->dev),
+ pipe);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register rx irq %d: %d\n",
+ (int)pipe->rx_irq, (int)err);
+ goto fail_request_rx_irq;
+ }
+
+ err = devm_request_irq(&pdev->dev, pipe->tx_irq,
+ okl4_pipe_tx_irq, 0, dev_name(&pdev->dev),
+ pipe);
+ if (err) {
+ dev_err(&pdev->dev, "cannot register tx irq %d: %d\n",
+ (int)pipe->tx_irq, (int)err);
+ goto fail_request_tx_irq;
+ }
+
+ dev_num = MKDEV(okl4_pipe_major, pipe_id);
+
+ cdev_init(&pipe->cdev, &okl4_pipe_fops);
+ pipe->cdev.owner = THIS_MODULE;
+ err = cdev_add(&pipe->cdev, dev_num, 1);
+ if (err) {
+ dev_err(&pdev->dev, "cannot add device: %d\n", (int)err);
+ goto fail_cdev_add;
+ }
+
+ device = device_create(okl4_pipe_class, NULL, dev_num, NULL,
+ DEVICE_NAME "%d", pipe_id);
+ if (IS_ERR(device)) {
+ err = PTR_ERR(device);
+ dev_err(&pdev->dev, "cannot create device: %d\n", (int)err);
+ goto fail_device_create;
+ }
+
+ return 0;
+
+fail_device_create:
+ cdev_del(&pipe->cdev);
+fail_cdev_add:
+ devm_free_irq(&pdev->dev, pipe->tx_irq, pipe);
+fail_request_tx_irq:
+ devm_free_irq(&pdev->dev, pipe->rx_irq, pipe);
+fail_request_rx_irq:
+ kfree(pipe->write_buf);
+fail_malloc_write:
+fail_irq_resource:
+fail_pipe_id:
+ dev_set_drvdata(&pdev->dev, NULL);
+ devm_kfree(&pdev->dev, priv);
+fail_alloc_priv:
+ return err;
+}
+
+static int __devexit
+okl4_pipe_remove(struct platform_device *pdev)
+{
+ struct okl4_pipe *pipe;
+ struct okl4_pipe_mv *priv = dev_get_drvdata(&pdev->dev);
+
+ if (priv->pipe_id < 0 || priv->pipe_id >= MAX_PIPES)
+ return -ENXIO;
+
+ pipe = &pipes[priv->pipe_id];
+
+ cdev_del(&pipe->cdev);
+
+ devm_free_irq(&pdev->dev, pipe->tx_irq, pipe);
+ devm_free_irq(&pdev->dev, pipe->rx_irq, pipe);
+
+ kfree(pipe->write_buf);
+
+ dev_set_drvdata(&pdev->dev, NULL);
+ devm_kfree(&pdev->dev, priv);
+
+ return 0;
+}
+
+static const struct of_device_id okl4_pipe_match[] = {
+ {
+ .compatible = "okl,pipe",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, okl4_pipe_match);
+
+static struct platform_driver okl4_pipe_driver = {
+ .probe = okl4_pipe_probe,
+ .remove = __devexit_p(okl4_pipe_remove),
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = okl4_pipe_match,
+ },
+};
+
+static int __init
+okl4_pipe_init(void)
+{
+ int err;
+ dev_t dev_num = 0;
+
+ err = alloc_chrdev_region(&dev_num, 0, MAX_PIPES, DEVICE_NAME);
+ if (err < 0) {
+ printk("%s: cannot allocate device region\n", __func__);
+ goto fail_alloc_chrdev_region;
+ }
+ okl4_pipe_major = MAJOR(dev_num);
+
+ okl4_pipe_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(okl4_pipe_class)) {
+ err = PTR_ERR(okl4_pipe_class);
+ goto fail_class_create;
+ }
+
+ /* Register the driver with the microvisor bus */
+ err = platform_driver_register(&okl4_pipe_driver);
+ if (err)
+ goto fail_driver_register;
+
+ return 0;
+
+fail_driver_register:
+ class_destroy(okl4_pipe_class);
+fail_class_create:
+ unregister_chrdev_region(dev_num, MAX_PIPES);
+fail_alloc_chrdev_region:
+ return err;
+}
+
+static void __exit
+okl4_pipe_exit(void)
+{
+ dev_t dev_num = MKDEV(okl4_pipe_major, 0);
+
+ platform_driver_unregister(&okl4_pipe_driver);
+ class_destroy(okl4_pipe_class);
+ unregister_chrdev_region(dev_num, MAX_PIPES);
+}
+
+module_init(okl4_pipe_init);
+module_exit(okl4_pipe_exit);
+
+MODULE_DESCRIPTION("OKL4 pipe driver");
+MODULE_AUTHOR("John Clarke <johnc@cog.systems>");
diff --git a/drivers/char/vs_serial_client.c b/drivers/char/vs_serial_client.c
new file mode 100644
index 0000000..a0bf1cc
--- /dev/null
+++ b/drivers/char/vs_serial_client.c
@@ -0,0 +1,132 @@
+/*
+ * drivers/char/vs_serial_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial vService client driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/client.h>
+
+#include "vs_serial_common.h"
+
+#define client_state_to_port(state) \
+ container_of(state, struct vtty_port, u.vs_client)
+
+static struct vs_mbuf *vs_serial_client_alloc_msg_buf(struct vtty_port *port,
+ struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+ return vs_client_serial_serial_alloc_msg(&port->u.vs_client, pbuf,
+ gfp_flags);
+}
+
+static void vs_serial_client_free_msg_buf(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+ vs_client_serial_serial_free_msg(&port->u.vs_client, pbuf, mbuf);
+}
+
+static int vs_serial_client_send_msg_buf(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+ return vs_client_serial_serial_send_msg(&port->u.vs_client, *pbuf,
+ mbuf);
+}
+
+static bool vs_serial_client_is_vservices_running(struct vtty_port *port)
+{
+ return VSERVICE_BASE_STATE_IS_RUNNING(port->u.vs_client.state.base);
+}
+
+static struct vtty_port_ops client_port_ops = {
+ .alloc_msg_buf = vs_serial_client_alloc_msg_buf,
+ .free_msg_buf = vs_serial_client_free_msg_buf,
+ .send_msg_buf = vs_serial_client_send_msg_buf,
+ .is_running = vs_serial_client_is_vservices_running,
+};
+
+static struct vs_client_serial_state *
+vs_serial_client_alloc(struct vs_service_device *service)
+{
+ struct vtty_port *port;
+
+ port = vs_serial_alloc_port(service, &client_port_ops);
+ if (!port)
+ return NULL;
+
+ dev_set_drvdata(&service->dev, port);
+ return &port->u.vs_client;
+}
+
+static void vs_serial_client_release(struct vs_client_serial_state *_state)
+{
+ vs_serial_release(client_state_to_port(_state));
+}
+
+static void vs_serial_client_closed(struct vs_client_serial_state *_state)
+{
+ vs_serial_reset(client_state_to_port(_state));
+}
+
+static void vs_serial_client_opened(struct vs_client_serial_state *_state)
+{
+ struct vtty_port *port = client_state_to_port(_state);
+
+ dev_dbg(&port->service->dev, "ack_open\n");
+ port->max_transfer_size = _state->packet_size;
+}
+
+static int
+vs_serial_client_handle_message(struct vs_client_serial_state *_state,
+ struct vs_pbuf data, struct vs_mbuf *mbuf)
+{
+ return vs_serial_handle_message(client_state_to_port(_state), mbuf,
+ &data);
+}
+
+static struct vs_client_serial vs_client_serial_driver = {
+ .rx_atomic = true,
+ .alloc = vs_serial_client_alloc,
+ .release = vs_serial_client_release,
+ .closed = vs_serial_client_closed,
+ .opened = vs_serial_client_opened,
+ .serial = {
+ .msg_msg = vs_serial_client_handle_message,
+ },
+};
+
+static int __init vs_serial_client_init(void)
+{
+ return vservice_serial_client_register(&vs_client_serial_driver,
+ "vserial");
+}
+
+static void __exit vs_serial_client_exit(void)
+{
+ vservice_serial_client_unregister(&vs_client_serial_driver);
+}
+
+module_init(vs_serial_client_init);
+module_exit(vs_serial_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/vs_serial_common.h b/drivers/char/vs_serial_common.h
new file mode 100644
index 0000000..2fe7d28
--- /dev/null
+++ b/drivers/char/vs_serial_common.h
@@ -0,0 +1,91 @@
+/*
+ * drivers/char/vs_serial_common.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#ifndef _VS_SERIAL_COMMON_H
+#define _VS_SERIAL_COMMON_H
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/mutex.h>
+#include <linux/completion.h>
+#include <linux/console.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/protocol/serial/client.h>
+
+#define OUTBUFFER_SIZE 1024
+#define vtty_list_last_entry(ptr, type, member) \
+ list_entry((ptr)->prev, type, member)
+
+struct vtty_port;
+struct vs_service_device;
+
+struct vtty_port_ops {
+ struct vs_mbuf *(*alloc_msg_buf)(struct vtty_port *port,
+ struct vs_pbuf *pbuf, gfp_t gfp_flags);
+ void (*free_msg_buf)(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+ int (*send_msg_buf)(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+ bool (*is_running)(struct vtty_port *port);
+};
+
+struct vtty_port {
+ union {
+ struct vs_client_serial_state vs_client;
+ struct vs_server_serial_state vs_server;
+ } u;
+
+ struct vs_service_device *service;
+ int port_num;
+
+ struct tty_driver *vtty_driver;
+
+ struct vtty_port_ops ops;
+
+ /* output data */
+ bool doing_release;
+
+ int max_transfer_size;
+
+ /* Tracks if tty layer can receive data from driver */
+ bool tty_canrecv;
+
+ /*
+ * List of pending incoming buffers from the vServices stack. If we
+ * receive a buffer, but cannot write it to the tty layer then we
+ * queue it on this list to handle later. in_lock protects access to
+ * the pending_in_packets list and the tty_canrecv field.
+ */
+ struct list_head pending_in_packets;
+ spinlock_t in_lock;
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+ struct console console;
+#endif
+
+ struct tty_port port;
+};
+
+extern struct vtty_port *
+vs_serial_alloc_port(struct vs_service_device *service,
+ struct vtty_port_ops *port_ops);
+extern void vs_serial_release(struct vtty_port *port);
+extern void vs_serial_reset(struct vtty_port *port);
+extern int vs_serial_handle_message(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf);
+
+#endif /* _VS_SERIAL_COMMON_H */
diff --git a/drivers/char/vs_serial_server.c b/drivers/char/vs_serial_server.c
new file mode 100644
index 0000000..d4a169e
--- /dev/null
+++ b/drivers/char/vs_serial_server.c
@@ -0,0 +1,152 @@
+/*
+ * drivers/char/vs_serial_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Serial vService server driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+
+#include "vs_serial_common.h"
+
+#define server_state_to_port(state) \
+ container_of(state, struct vtty_port, u.vs_server)
+
+static struct vs_mbuf *vs_serial_server_alloc_msg_buf(struct vtty_port *port,
+ struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+ return vs_server_serial_serial_alloc_msg(&port->u.vs_server, pbuf,
+ gfp_flags);
+}
+
+static void vs_serial_server_free_msg_buf(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+ vs_server_serial_serial_free_msg(&port->u.vs_server, pbuf, mbuf);
+}
+
+static int vs_serial_server_send_msg_buf(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf)
+{
+ return vs_server_serial_serial_send_msg(&port->u.vs_server, *pbuf, mbuf);
+}
+
+static bool vs_serial_server_is_vservices_running(struct vtty_port *port)
+{
+ return VSERVICE_BASE_STATE_IS_RUNNING(port->u.vs_server.state.base);
+}
+
+static struct vtty_port_ops server_port_ops = {
+ .alloc_msg_buf = vs_serial_server_alloc_msg_buf,
+ .free_msg_buf = vs_serial_server_free_msg_buf,
+ .send_msg_buf = vs_serial_server_send_msg_buf,
+ .is_running = vs_serial_server_is_vservices_running,
+};
+
+static struct vs_server_serial_state *
+vs_serial_server_alloc(struct vs_service_device *service)
+{
+ struct vtty_port *port;
+
+ port = vs_serial_alloc_port(service, &server_port_ops);
+ if (!port)
+ return NULL;
+
+ dev_set_drvdata(&service->dev, port);
+ return &port->u.vs_server;
+}
+
+static void vs_serial_server_release(struct vs_server_serial_state *_state)
+{
+ vs_serial_release(server_state_to_port(_state));
+}
+
+static void vs_serial_server_closed(struct vs_server_serial_state *_state)
+{
+ vs_serial_reset(server_state_to_port(_state));
+}
+
+static int
+vs_serial_server_handle_message(struct vs_server_serial_state *_state,
+ struct vs_pbuf data, struct vs_mbuf *mbuf)
+{
+ return vs_serial_handle_message(server_state_to_port(_state), mbuf,
+ &data);
+}
+
+static vs_server_response_type_t
+vs_serial_server_req_open(struct vs_server_serial_state *_state)
+{
+ struct vtty_port *port = server_state_to_port(_state);
+
+ dev_dbg(&port->service->dev, "req_open\n");
+
+ /* FIXME: Jira ticket SDK-3521 - ryanm. */
+ port->max_transfer_size = vs_service_max_mbuf_size(port->service) - 8;
+ _state->packet_size = port->max_transfer_size;
+
+ return VS_SERVER_RESP_SUCCESS;
+}
+
+static vs_server_response_type_t
+vs_serial_server_req_close(struct vs_server_serial_state *_state)
+{
+ struct vtty_port *port = server_state_to_port(_state);
+
+ dev_dbg(&port->service->dev, "req_close\n");
+
+ return VS_SERVER_RESP_SUCCESS;
+}
+
+static struct vs_server_serial vs_server_serial_driver = {
+ .rx_atomic = true,
+ .alloc = vs_serial_server_alloc,
+ .release = vs_serial_server_release,
+ .closed = vs_serial_server_closed,
+ .open = vs_serial_server_req_open,
+ .close = vs_serial_server_req_close,
+ .serial = {
+ .msg_msg = vs_serial_server_handle_message,
+ },
+
+ /* Large default quota for batching data messages */
+ .in_quota_best = 16,
+ .out_quota_best = 16,
+};
+
+static int __init vs_serial_server_init(void)
+{
+ return vservice_serial_server_register(&vs_server_serial_driver,
+ "vserial");
+}
+
+static void __exit vs_serial_server_exit(void)
+{
+ vservice_serial_server_unregister(&vs_server_serial_driver);
+}
+
+module_init(vs_serial_server_init);
+module_exit(vs_serial_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/char/vservices_serial.c b/drivers/char/vservices_serial.c
new file mode 100644
index 0000000..0194eac
--- /dev/null
+++ b/drivers/char/vservices_serial.c
@@ -0,0 +1,634 @@
+/*
+ * drivers/char/vservice_serial.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * serial vservice client driver
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/bitmap.h>
+#include <linux/tty.h>
+#include <linux/tty_driver.h>
+#include <linux/tty_flip.h>
+#include <linux/console.h>
+#include <linux/version.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/mutex.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/service.h>
+#include <vservices/wait.h>
+
+#include "vs_serial_common.h"
+
+struct vtty_in_packet {
+ struct vs_pbuf pbuf;
+ size_t offset;
+};
+
+static int max_ttys = CONFIG_VSERVICES_VTTY_COUNT;
+static unsigned long *alloced_ttys;
+module_param(max_ttys, int, S_IRUGO);
+
+static struct tty_driver *vtty_driver;
+
+static DEFINE_MUTEX(tty_bitmap_lock);
+
+static struct vtty_port *dev_to_port(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+#if defined(CONFIG_VSERVICES_SERIAL_SERVER) || \
+ defined(CONFIG_VSERIVCES_SERIAL_SERVER_MODULE)
+ if (service->is_server) {
+ struct vs_server_serial_state *server = dev_get_drvdata(dev);
+ return container_of(server, struct vtty_port, u.vs_server);
+ }
+#endif
+#if defined(CONFIG_VSERVICES_SERIAL_CLIENT) || \
+ defined(CONFIG_VSERIVCES_SERIAL_CLIENT_MODULE)
+ if (!service->is_server) {
+ struct vs_client_serial_state *client = dev_get_drvdata(dev);
+ return container_of(client, struct vtty_port, u.vs_client);
+ }
+#endif
+ /* should never get here */
+ WARN_ON(1);
+ return NULL;
+}
+
+static struct vtty_port *port_from_tty(struct tty_struct *tty)
+{
+ return dev_to_port(tty->dev->parent);
+}
+
+static int vtty_install(struct tty_driver *driver, struct tty_struct *tty)
+{
+ struct vtty_port *port;
+
+ if (tty->index < 0 || !test_bit(tty->index, alloced_ttys))
+ return -ENXIO;
+
+ port = port_from_tty(tty);
+
+ if (!port)
+ return -ENXIO;
+
+ tty->driver_data = port;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+ if (tty->port)
+ tty->port->low_latency = 0;
+#else
+ tty->low_latency = 0;
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,7,0)
+ tty_port_install(&port->port, driver, tty);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,0)
+ tty->port = &port->port;
+ tty_standard_install(driver, tty);
+#else
+ tty->port = &port->port;
+ if (tty_init_termios(tty) != 0)
+ return -ENOMEM;
+
+ tty_driver_kref_get(driver);
+ tty->count++;
+ driver->ttys[tty->index] = tty;
+#endif
+
+ return 0;
+}
+
+static int vtty_open(struct tty_struct *tty, struct file *file)
+{
+ struct vtty_port *port = tty->driver_data;
+ return tty_port_open(&port->port, tty, file);
+}
+
+static void vtty_close(struct tty_struct *tty, struct file *file)
+{
+ struct vtty_port *port = tty->driver_data;
+ if (port)
+ tty_port_close(&port->port, tty, file);
+}
+
+static void vtty_shutdown(struct tty_port *port)
+{
+ struct vtty_port *vtty_port =
+ container_of(port, struct vtty_port, port);
+
+ if (vtty_port->doing_release)
+ kfree(port);
+}
+
+static int vtty_write_room(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+
+ return vs_service_send_mbufs_available(port->service) *
+ port->max_transfer_size;
+}
+
+static struct vs_mbuf *vserial_alloc_send_buffer(struct vtty_port *port,
+ const unsigned char *buf, size_t size, struct vs_pbuf *pbuf,
+ gfp_t gfp_flags)
+{
+ struct vs_mbuf *mbuf;
+ ssize_t ret;
+
+ mbuf = port->ops.alloc_msg_buf(port, pbuf, gfp_flags);
+ if (IS_ERR(mbuf)) {
+ ret = PTR_ERR(mbuf);
+ goto fail;
+ }
+
+ ret = vs_pbuf_resize(pbuf, size);
+ if (ret < (ssize_t)size)
+ goto fail_free_buf;
+
+ ret = vs_pbuf_copyin(pbuf, 0, buf, size);
+ if (ret < (ssize_t)size)
+ goto fail_free_buf;
+
+ return mbuf;
+
+fail_free_buf:
+ port->ops.free_msg_buf(port, mbuf, pbuf);
+fail:
+ return ERR_PTR(ret);
+}
+
+static int vtty_write(struct tty_struct *tty, const unsigned char *buf,
+ int count)
+{
+ struct vtty_port *port;
+ size_t sent_bytes = 0, size;
+ struct vs_mbuf *mbuf;
+ struct vs_pbuf pbuf;
+ int err;
+
+ if (WARN_ON(!tty || !buf))
+ return -EINVAL;
+
+ port = tty->driver_data;
+ if (!port->ops.is_running(port)) {
+ dev_dbg(&port->service->dev, "tty is not running!");
+ return 0;
+ }
+
+ /*
+ * We need to break our message up into chunks of
+ * port->max_transfer_size.
+ */
+ dev_dbg(&port->service->dev, "Writing %d bytes\n", count);
+ while (sent_bytes < count) {
+ size = min_t(size_t, count - sent_bytes,
+ port->max_transfer_size);
+
+ /*
+ * Passing &port->u.vs_client here works for both the client
+ * and the server since vs_client and vs_server are in the
+ * same union, and therefore have the same address.
+ */
+ mbuf = vs_service_waiting_alloc(&port->u.vs_client,
+ vserial_alloc_send_buffer(port,
+ buf + sent_bytes, size, &pbuf, GFP_KERNEL));
+ if (IS_ERR(mbuf)) {
+ dev_err(&port->service->dev,
+ "Failed to alloc mbuf of %zu bytes: %ld - resetting service\n",
+ size, PTR_ERR(mbuf));
+ vs_service_reset(port->service, port->service);
+ return -EIO;
+ }
+
+ vs_service_state_lock(port->service);
+ err = port->ops.send_msg_buf(port, mbuf, &pbuf);
+ vs_service_state_unlock(port->service);
+ if (err) {
+ port->ops.free_msg_buf(port, mbuf, &pbuf);
+ dev_err(&port->service->dev,
+ "send failed: %d - resetting service",
+ err);
+ vs_service_reset(port->service, port->service);
+ return -EIO;
+ }
+
+ dev_dbg(&port->service->dev, "Sent %zu bytes (%zu/%d)\n",
+ size, sent_bytes + size, count);
+ sent_bytes += size;
+ }
+
+ dev_dbg(&port->service->dev, "Write complete - sent %zu/%d bytes\n",
+ sent_bytes, count);
+ return sent_bytes;
+}
+
+static int vtty_put_char(struct tty_struct *tty, unsigned char ch)
+{
+ return vtty_write(tty, &ch, 1);
+}
+
+static size_t vs_serial_send_pbuf_to_tty(struct vtty_port *port,
+ struct vs_pbuf *pbuf, size_t offset)
+{
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+ size_t space, size;
+
+ lockdep_assert_held(&port->in_lock);
+
+ size = vs_pbuf_size(pbuf) - offset;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+ space = tty_buffer_request_room(tty->port, size);
+#else
+ space = tty_buffer_request_room(tty, size);
+#endif
+ if (space) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,9,0)
+ tty_insert_flip_string(tty->port, pbuf->data + offset, space);
+ tty_flip_buffer_push(tty->port);
+#else
+ tty_insert_flip_string(tty, pbuf->data + offset, space);
+ tty_flip_buffer_push(tty);
+#endif
+ }
+
+ tty_kref_put(tty);
+
+ /* Return the number of bytes written */
+ return space;
+}
+
+static void vtty_throttle(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+
+ dev_dbg(&port->service->dev, "throttle\n");
+
+ spin_lock_bh(&port->in_lock);
+ port->tty_canrecv = false;
+ spin_unlock_bh(&port->in_lock);
+}
+
+static void vtty_unthrottle(struct tty_struct *tty)
+{
+ struct vtty_port *port = tty->driver_data;
+ struct vtty_in_packet *packet;
+ struct vs_mbuf *mbuf;
+ size_t sent;
+
+ dev_dbg(&port->service->dev, "unthrottle\n");
+
+ spin_lock_bh(&port->in_lock);
+
+ while (!list_empty(&port->pending_in_packets)) {
+ mbuf = list_first_entry(&port->pending_in_packets,
+ struct vs_mbuf, queue);
+ packet = mbuf->priv;
+
+ sent = vs_serial_send_pbuf_to_tty(port, &packet->pbuf,
+ packet->offset);
+ packet->offset += sent;
+ if (packet->offset < vs_pbuf_size(&packet->pbuf)) {
+ /*
+ * Only wrote part of the buffer. This means that we
+ * still have pending data that cannot be written to
+ * the tty at this time. The tty layer will rethrottle
+ * and this function will be called again when the tty
+ * layer is next able to handle data and we can write
+ * the remainder of the buffer.
+ */
+ dev_dbg(&port->service->dev,
+ "unthrottle: Only wrote %zu (%zu/%zu) bytes\n",
+ sent, packet->offset,
+ vs_pbuf_size(&packet->pbuf));
+ break;
+ }
+
+ dev_dbg(&port->service->dev,
+ "unthrottle: wrote %zu (%zu/%zu) bytes\n",
+ sent, packet->offset,
+ vs_pbuf_size(&packet->pbuf));
+
+ /* Wrote the whole buffer - free it */
+ list_del(&mbuf->queue);
+ port->ops.free_msg_buf(port, mbuf, &packet->pbuf);
+ kfree(packet);
+ }
+
+ port->tty_canrecv = true;
+ spin_unlock_bh(&port->in_lock);
+}
+
+static struct tty_port_operations vtty_port_ops = {
+ .shutdown = vtty_shutdown,
+};
+
+static struct tty_operations vtty_ops = {
+ .install = vtty_install,
+ .open = vtty_open,
+ .close = vtty_close,
+ .write = vtty_write,
+ .write_room = vtty_write_room,
+ .put_char = vtty_put_char,
+ .throttle = vtty_throttle,
+ .unthrottle = vtty_unthrottle
+};
+
+static int vs_serial_queue_incoming_packet(struct vtty_port *port,
+ struct vs_mbuf *mbuf, struct vs_pbuf *pbuf, size_t offset)
+{
+ struct vtty_in_packet *packet;
+
+ lockdep_assert_held(&port->in_lock);
+
+ packet = kzalloc(sizeof(*packet), GFP_ATOMIC);
+ if (!packet) {
+ /*
+ * Uh oh, we are seriously out of memory. The incoming data
+ * will be lost.
+ */
+ return -ENOMEM;
+ }
+
+ dev_dbg(&port->service->dev, "Queuing packet %zu bytes, offset %zu\n",
+ vs_pbuf_size(pbuf), offset);
+ mbuf->priv = packet;
+ memcpy(&packet->pbuf, pbuf, sizeof(*pbuf));
+ packet->offset = offset;
+
+ list_add_tail(&mbuf->queue, &port->pending_in_packets);
+ return 0;
+}
+
+int vs_serial_handle_message(struct vtty_port *port, struct vs_mbuf *mbuf,
+ struct vs_pbuf *pbuf)
+{
+ struct tty_struct *tty = tty_port_tty_get(&port->port);
+ bool queue_packet = false;
+ size_t sent = 0;
+ int err;
+
+ if (!tty) {
+ dev_dbg(&port->service->dev,
+ "tty not open. Dropping %zu chars\n",
+ pbuf->size);
+ port->ops.free_msg_buf(port, mbuf, pbuf);
+ return 0;
+ }
+
+ dev_dbg(&port->service->dev, "Incoming message - len = %zu\n",
+ pbuf->size);
+
+ spin_lock(&port->in_lock);
+ if (!port->tty_canrecv || !list_empty(&port->pending_in_packets)) {
+ /*
+ * We cannot send to the tty right now, either because we are
+ * being throttled or because we still have pending data
+ * to write out to the tty. Queue the buffer up so we can
+ * write it later.
+ */
+ dev_dbg(&port->service->dev,
+ "Cannot send (canrecv = %d, queued = %d) - queuing message\n",
+ port->tty_canrecv,
+ !list_empty(&port->pending_in_packets));
+ queue_packet = true;
+
+ } else {
+ sent = vs_serial_send_pbuf_to_tty(port, pbuf, 0);
+ if (sent < vs_pbuf_size(pbuf)) {
+ /*
+ * Only wrote part of the buffer to the tty. Queue
+ * the buffer to write the rest.
+ */
+ dev_dbg(&port->service->dev,
+ "Sent %zu/%zu bytes to tty - queueing rest\n",
+ sent, vs_pbuf_size(pbuf));
+ queue_packet = true;
+ }
+ }
+
+ if (queue_packet) {
+ /*
+ * Queue the incoming data up. If we are not already throttled,
+ * the tty layer will do so now since it has no room in its
+ * buffers.
+ */
+ err = vs_serial_queue_incoming_packet(port, mbuf, pbuf, sent);
+ if (err) {
+ dev_err(&port->service->dev,
+ "Failed to queue packet - dropping chars\n");
+ port->ops.free_msg_buf(port, mbuf, pbuf);
+ }
+
+ } else {
+ port->ops.free_msg_buf(port, mbuf, pbuf);
+ }
+
+ spin_unlock(&port->in_lock);
+ tty_kref_put(tty);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_serial_handle_message);
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+static int vconsole_setup(struct console *co, char *options)
+{
+ if (co->index < 0 || co->index >= max_ttys)
+ co->index = 0;
+
+ pr_info("OKL4 virtual console init\n");
+
+ return 0;
+}
+
+static void vconsole_write(struct console *co, const char *p, unsigned count)
+{
+}
+
+static struct tty_driver *vconsole_device(struct console *co, int *index)
+{
+ *index = co->index;
+
+ return vtty_driver;
+}
+#endif /* CONFIG_OKL4_VTTY_CONSOLE */
+
+static void vs_serial_free_buffers(struct vtty_port *port)
+{
+ struct vtty_in_packet *packet;
+ struct vs_mbuf *mbuf;
+
+ /* Free the list of incoming buffers */
+ spin_lock_bh(&port->in_lock);
+ while (!list_empty(&port->pending_in_packets)) {
+ mbuf = list_first_entry(&port->pending_in_packets,
+ struct vs_mbuf, queue);
+ packet = mbuf->priv;
+
+ list_del(&mbuf->queue);
+ port->ops.free_msg_buf(port, mbuf, &packet->pbuf);
+ kfree(packet);
+ }
+ spin_unlock_bh(&port->in_lock);
+}
+
+/** vservices callbacks **/
+struct vtty_port *vs_serial_alloc_port(struct vs_service_device *service,
+ struct vtty_port_ops *port_ops)
+{
+ struct vtty_port *port;
+ int port_num;
+
+ mutex_lock(&tty_bitmap_lock);
+ port_num = find_first_zero_bit(alloced_ttys, max_ttys);
+
+ if (port_num >= max_ttys) {
+ mutex_unlock(&tty_bitmap_lock);
+ return NULL;
+ }
+
+ port = kzalloc(sizeof(struct vtty_port), GFP_KERNEL);
+ if (!port) {
+ mutex_unlock(&tty_bitmap_lock);
+ return NULL;
+ }
+
+ port->service = service;
+ port->ops = *port_ops;
+ port->tty_canrecv = true;
+ port->port_num = port_num;
+ INIT_LIST_HEAD(&port->pending_in_packets);
+ spin_lock_init(&port->in_lock);
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+ /* Set up and register the port's console device */
+ strlcpy(port->console.name, "vconvs", sizeof(port->console.name));
+ port->console.write = vconsole_write;
+ port->console.flags = CON_PRINTBUFFER;
+ port->console.device = vconsole_device;
+ port->console.setup = vconsole_setup;
+ port->console.index = port_num;
+
+ register_console(&port->console);
+#endif
+ port->vtty_driver = vtty_driver;
+
+ tty_port_init(&port->port);
+ port->port.ops = &vtty_port_ops;
+
+ tty_register_device(vtty_driver, port_num, &service->dev);
+ bitmap_set(alloced_ttys, port_num, 1);
+ mutex_unlock(&tty_bitmap_lock);
+
+ return port;
+}
+EXPORT_SYMBOL(vs_serial_alloc_port);
+
+void vs_serial_release(struct vtty_port *port)
+{
+ dev_dbg(&port->service->dev, "Release\n");
+
+#ifdef CONFIG_OKL4_VTTY_CONSOLE
+ unregister_console(&port->console);
+#endif
+
+ mutex_lock(&tty_bitmap_lock);
+ bitmap_clear(alloced_ttys, port->port_num, 1);
+ mutex_unlock(&tty_bitmap_lock);
+
+ if (port->port.tty) {
+ tty_vhangup(port->port.tty);
+ tty_kref_put(port->port.tty);
+ }
+
+ vs_serial_free_buffers(port);
+ port->doing_release = true;
+ tty_unregister_device(vtty_driver, port->port_num);
+}
+EXPORT_SYMBOL_GPL(vs_serial_release);
+
+void vs_serial_reset(struct vtty_port *port)
+{
+ /* Free list of in and out mbufs. */
+ vs_serial_free_buffers(port);
+}
+EXPORT_SYMBOL_GPL(vs_serial_reset);
+
+static int __init vs_serial_init(void)
+{
+ int err;
+
+ if (max_ttys == 0)
+ return -EINVAL;
+
+ alloced_ttys = kzalloc(sizeof(unsigned long) * BITS_TO_LONGS(max_ttys),
+ GFP_KERNEL);
+ if (!alloced_ttys) {
+ err = -ENOMEM;
+ goto fail_alloc_ttys;
+ }
+
+ /* Set up the tty driver. */
+ vtty_driver = alloc_tty_driver(max_ttys);
+ if (!vtty_driver) {
+ err = -ENOMEM;
+ goto fail_alloc_tty_driver;
+ }
+
+ vtty_driver->owner = THIS_MODULE;
+ vtty_driver->driver_name = "okl4-vservices-serial";
+ vtty_driver->name = "ttyVS";
+ vtty_driver->type = TTY_DRIVER_TYPE_SERIAL;
+ vtty_driver->subtype = SERIAL_TYPE_NORMAL;
+ vtty_driver->flags = TTY_DRIVER_REAL_RAW | TTY_DRIVER_DYNAMIC_DEV;
+ vtty_driver->init_termios = tty_std_termios;
+ vtty_driver->num = max_ttys;
+
+ /* These flags don't really matter; just use sensible defaults. */
+ vtty_driver->init_termios.c_cflag =
+ B9600 | CS8 | CREAD | HUPCL | CLOCAL;
+ vtty_driver->init_termios.c_ispeed = 9600;
+ vtty_driver->init_termios.c_ospeed = 9600;
+
+ tty_set_operations(vtty_driver, &vtty_ops);
+
+ err = tty_register_driver(vtty_driver);
+ if (err)
+ goto fail_tty_driver_register;
+
+ return 0;
+
+fail_tty_driver_register:
+ put_tty_driver(vtty_driver);
+fail_alloc_tty_driver:
+ kfree(alloced_ttys);
+fail_alloc_ttys:
+ return err;
+}
+
+static void __exit vs_serial_exit(void)
+{
+ tty_unregister_driver(vtty_driver);
+ put_tty_driver(vtty_driver);
+}
+
+module_init(vs_serial_init);
+module_exit(vs_serial_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Serial Core Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/clk/msm/clock-local2.c b/drivers/clk/msm/clock-local2.c
index f200d0b..eaaa5ab 100644
--- a/drivers/clk/msm/clock-local2.c
+++ b/drivers/clk/msm/clock-local2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1486,8 +1486,8 @@
{
struct rcg_clk *rcg = to_rcg_clk(clk);
struct clk_freq_tbl *pixel_freq = rcg->current_freq;
- int frac_num[] = {3, 2, 4, 1};
- int frac_den[] = {8, 9, 9, 1};
+ int frac_num[] = {1, 3, 2, 4};
+ int frac_den[] = {1, 8, 9, 9};
int delta = 100000;
int i, rc;
diff --git a/drivers/clocksource/timer-ti-32k.c b/drivers/clocksource/timer-ti-32k.c
index cf5b14e..e9ab92d 100644
--- a/drivers/clocksource/timer-ti-32k.c
+++ b/drivers/clocksource/timer-ti-32k.c
@@ -98,6 +98,9 @@
return -ENXIO;
}
+ if (!of_machine_is_compatible("ti,am43"))
+ ti_32k_timer.cs.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
+
ti_32k_timer.counter = ti_32k_timer.base;
/*
diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
index 8f9c8b6..c2b811f 100644
--- a/drivers/cpufreq/cpufreq.c
+++ b/drivers/cpufreq/cpufreq.c
@@ -152,7 +152,7 @@
u64 cur_wall_time;
u64 busy_time;
- cur_wall_time = jiffies64_to_cputime64(get_jiffies_64());
+ cur_wall_time = jiffies64_to_nsecs(get_jiffies_64());
busy_time = kcpustat_cpu(cpu).cpustat[CPUTIME_USER];
busy_time += kcpustat_cpu(cpu).cpustat[CPUTIME_SYSTEM];
@@ -163,9 +163,9 @@
idle_time = cur_wall_time - busy_time;
if (wall)
- *wall = cputime_to_usecs(cur_wall_time);
+ *wall = div_u64(cur_wall_time, NSEC_PER_USEC);
- return cputime_to_usecs(idle_time);
+ return div_u64(idle_time, NSEC_PER_USEC);
}
u64 get_cpu_idle_time(unsigned int cpu, u64 *wall, int io_busy)
diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
index 642dd0f..38d1a82 100644
--- a/drivers/cpufreq/cpufreq_governor.c
+++ b/drivers/cpufreq/cpufreq_governor.c
@@ -152,7 +152,7 @@
if (ignore_nice) {
u64 cur_nice = kcpustat_cpu(j).cpustat[CPUTIME_NICE];
- idle_time += cputime_to_usecs(cur_nice - j_cdbs->prev_cpu_nice);
+ idle_time += div_u64(cur_nice - j_cdbs->prev_cpu_nice, NSEC_PER_USEC);
j_cdbs->prev_cpu_nice = cur_nice;
}
diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
index 06d3abd..b084708 100644
--- a/drivers/cpufreq/cpufreq_stats.c
+++ b/drivers/cpufreq/cpufreq_stats.c
@@ -13,7 +13,6 @@
#include <linux/cpufreq.h>
#include <linux/module.h>
#include <linux/slab.h>
-#include <linux/cputime.h>
static DEFINE_SPINLOCK(cpufreq_stats_lock);
diff --git a/drivers/cpuidle/lpm-levels-legacy.c b/drivers/cpuidle/lpm-levels-legacy.c
index 26cb52a..4e94876 100644
--- a/drivers/cpuidle/lpm-levels-legacy.c
+++ b/drivers/cpuidle/lpm-levels-legacy.c
@@ -100,7 +100,7 @@
static void cluster_unprepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle,
- int64_t time);
+ int64_t time, bool success);
static void cluster_prepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle,
int64_t time);
@@ -736,7 +736,7 @@
static void cluster_unprepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle,
- int64_t end_time)
+ int64_t end_time, bool success)
{
struct lpm_cluster_level *level;
bool first_cpu;
@@ -769,12 +769,12 @@
if (cluster->stats->sleep_time)
cluster->stats->sleep_time = end_time -
cluster->stats->sleep_time;
- lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
+ lpm_stats_cluster_exit(cluster->stats, cluster->last_level, success);
level = &cluster->levels[cluster->last_level];
if (level->notify_rpm) {
if (sys_pm_ops && sys_pm_ops->exit)
- sys_pm_ops->exit();
+ sys_pm_ops->exit(success);
/* If RPM bumps up CX to turbo, unvote CX turbo vote
* during exit of rpm assisted power collapse to
@@ -807,7 +807,7 @@
cluster_notify(cluster, &cluster->levels[last_level], false);
cluster_unprepare(cluster->parent, &cluster->child_cpus,
- last_level, from_idle, end_time);
+ last_level, from_idle, end_time, success);
unlock_return:
spin_unlock(&cluster->sync_lock);
}
@@ -1039,7 +1039,7 @@
end_time = ktime_to_ns(ktime_get());
lpm_stats_cpu_exit(idx, end_time, success);
- cluster_unprepare(cluster, cpumask, idx, true, end_time);
+ cluster_unprepare(cluster, cpumask, idx, true, end_time, success);
cpu_unprepare(cluster, idx, true);
trace_cpu_idle_exit(idx, success);
@@ -1248,6 +1248,7 @@
struct lpm_cpu *lpm_cpu = cluster->cpu;
const struct cpumask *cpumask = get_cpu_mask(cpu);
int idx;
+ bool success = true;
for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
@@ -1275,13 +1276,13 @@
if (!use_psci)
msm_cpu_pm_enter_sleep(cluster->cpu->levels[idx].mode, false);
else
- psci_enter_sleep(cluster, idx, true);
+ success = psci_enter_sleep(cluster, idx, true);
if (idx > 0)
update_debug_pc_event(CPU_EXIT, idx, true, 0xdeaffeed,
false);
- cluster_unprepare(cluster, cpumask, idx, false, 0);
+ cluster_unprepare(cluster, cpumask, idx, false, 0, success);
cpu_unprepare(cluster, idx, false);
return 0;
}
@@ -1304,7 +1305,8 @@
update_debug_pc_event(CPU_HP_STARTING, cpu,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], false);
- cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+ cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS,
+ false, 0, true);
return 0;
}
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index ae72206..fdf2888 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -117,7 +117,7 @@
static void cluster_unprepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle,
- int64_t time);
+ int64_t time, bool success);
static void cluster_prepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle,
int64_t time);
@@ -328,7 +328,8 @@
update_debug_pc_event(CPU_HP_STARTING, cpu,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], false);
- cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false, 0);
+ cluster_unprepare(cluster, get_cpu_mask(cpu), NR_LPM_LEVELS, false,
+ 0, true);
return 0;
}
@@ -1165,7 +1166,7 @@
static void cluster_unprepare(struct lpm_cluster *cluster,
const struct cpumask *cpu, int child_idx, bool from_idle,
- int64_t end_time)
+ int64_t end_time, bool success)
{
struct lpm_cluster_level *level;
bool first_cpu;
@@ -1202,14 +1203,14 @@
if (cluster->stats->sleep_time)
cluster->stats->sleep_time = end_time -
cluster->stats->sleep_time;
- lpm_stats_cluster_exit(cluster->stats, cluster->last_level, true);
+ lpm_stats_cluster_exit(cluster->stats, cluster->last_level, success);
level = &cluster->levels[cluster->last_level];
if (level->notify_rpm)
if (sys_pm_ops && sys_pm_ops->exit) {
spin_lock(&bc_timer_lock);
- sys_pm_ops->exit();
+ sys_pm_ops->exit(success);
spin_unlock(&bc_timer_lock);
}
@@ -1229,7 +1230,7 @@
update_cluster_history(&cluster->history, last_level);
cluster_unprepare(cluster->parent, &cluster->child_cpus,
- last_level, from_idle, end_time);
+ last_level, from_idle, end_time, success);
unlock_return:
spin_unlock(&cluster->sync_lock);
}
@@ -1423,7 +1424,7 @@
end_time = ktime_to_ns(ktime_get());
lpm_stats_cpu_exit(idx, end_time, success);
- cluster_unprepare(cpu->parent, cpumask, idx, true, end_time);
+ cluster_unprepare(cpu->parent, cpumask, idx, true, end_time, success);
cpu_unprepare(cpu, idx, true);
dev->last_residency = ktime_us_delta(ktime_get(), start);
update_history(dev, idx);
@@ -1441,6 +1442,7 @@
{
struct lpm_cpu *cpu = per_cpu(cpu_lpm, dev->cpu);
const struct cpumask *cpumask = get_cpu_mask(dev->cpu);
+ bool success = false;
for (; idx >= 0; idx--) {
if (lpm_cpu_mode_allow(dev->cpu, idx, false))
@@ -1454,9 +1456,9 @@
cpu_prepare(cpu, idx, true);
cluster_prepare(cpu->parent, cpumask, idx, false, 0);
- psci_enter_sleep(cpu, idx, false);
+ success = psci_enter_sleep(cpu, idx, false);
- cluster_unprepare(cpu->parent, cpumask, idx, false, 0);
+ cluster_unprepare(cpu->parent, cpumask, idx, false, 0, success);
cpu_unprepare(cpu, idx, true);
}
@@ -1671,6 +1673,7 @@
struct lpm_cluster *cluster = lpm_cpu->parent;
const struct cpumask *cpumask = get_cpu_mask(cpu);
int idx;
+ bool success;
for (idx = lpm_cpu->nlevels - 1; idx >= 0; idx--) {
if (lpm_cpu_mode_allow(cpu, idx, false))
@@ -1683,9 +1686,9 @@
cpu_prepare(lpm_cpu, idx, false);
cluster_prepare(cluster, cpumask, idx, false, 0);
- psci_enter_sleep(lpm_cpu, idx, false);
+ success = psci_enter_sleep(lpm_cpu, idx, false);
- cluster_unprepare(cluster, cpumask, idx, false, 0);
+ cluster_unprepare(cluster, cpumask, idx, false, 0, success);
cpu_unprepare(lpm_cpu, idx, false);
return 0;
}
diff --git a/drivers/crypto/mxs-dcp.c b/drivers/crypto/mxs-dcp.c
index 625ee50..decaed4 100644
--- a/drivers/crypto/mxs-dcp.c
+++ b/drivers/crypto/mxs-dcp.c
@@ -63,7 +63,7 @@
struct dcp_coherent_block *coh;
struct completion completion[DCP_MAX_CHANS];
- struct mutex mutex[DCP_MAX_CHANS];
+ spinlock_t lock[DCP_MAX_CHANS];
struct task_struct *thread[DCP_MAX_CHANS];
struct crypto_queue queue[DCP_MAX_CHANS];
};
@@ -349,13 +349,20 @@
int ret;
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&sdcp->mutex[chan]);
+ spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]);
- mutex_unlock(&sdcp->mutex[chan]);
+ spin_unlock(&sdcp->lock[chan]);
+
+ if (!backlog && !arq) {
+ schedule();
+ continue;
+ }
+
+ set_current_state(TASK_RUNNING);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -363,11 +370,8 @@
if (arq) {
ret = mxs_dcp_aes_block_crypt(arq);
arq->complete(arq, ret);
- continue;
}
-
- schedule();
- } while (!kthread_should_stop());
+ }
return 0;
}
@@ -409,9 +413,9 @@
rctx->ecb = ecb;
actx->chan = DCP_CHAN_CRYPTO;
- mutex_lock(&sdcp->mutex[actx->chan]);
+ spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
- mutex_unlock(&sdcp->mutex[actx->chan]);
+ spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]);
@@ -640,13 +644,20 @@
struct ahash_request *req;
int ret, fini;
- do {
- __set_current_state(TASK_INTERRUPTIBLE);
+ while (!kthread_should_stop()) {
+ set_current_state(TASK_INTERRUPTIBLE);
- mutex_lock(&sdcp->mutex[chan]);
+ spin_lock(&sdcp->lock[chan]);
backlog = crypto_get_backlog(&sdcp->queue[chan]);
arq = crypto_dequeue_request(&sdcp->queue[chan]);
- mutex_unlock(&sdcp->mutex[chan]);
+ spin_unlock(&sdcp->lock[chan]);
+
+ if (!backlog && !arq) {
+ schedule();
+ continue;
+ }
+
+ set_current_state(TASK_RUNNING);
if (backlog)
backlog->complete(backlog, -EINPROGRESS);
@@ -658,12 +669,8 @@
ret = dcp_sha_req_to_buf(arq);
fini = rctx->fini;
arq->complete(arq, ret);
- if (!fini)
- continue;
}
-
- schedule();
- } while (!kthread_should_stop());
+ }
return 0;
}
@@ -721,9 +728,9 @@
rctx->init = 1;
}
- mutex_lock(&sdcp->mutex[actx->chan]);
+ spin_lock(&sdcp->lock[actx->chan]);
ret = crypto_enqueue_request(&sdcp->queue[actx->chan], &req->base);
- mutex_unlock(&sdcp->mutex[actx->chan]);
+ spin_unlock(&sdcp->lock[actx->chan]);
wake_up_process(sdcp->thread[actx->chan]);
mutex_unlock(&actx->mutex);
@@ -979,7 +986,7 @@
platform_set_drvdata(pdev, sdcp);
for (i = 0; i < DCP_MAX_CHANS; i++) {
- mutex_init(&sdcp->mutex[i]);
+ spin_lock_init(&sdcp->lock[i]);
init_completion(&sdcp->completion[i]);
crypto_init_queue(&sdcp->queue[i], 50);
}
diff --git a/drivers/crypto/qat/qat_c3xxx/adf_drv.c b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
index 640c3fc..ad9d6fb 100644
--- a/drivers/crypto/qat/qat_c3xxx/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxx/adf_drv.c
@@ -123,7 +123,8 @@
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C3XXX_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
index 949d77b..0dd8d2d 100644
--- a/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c3xxxvf/adf_drv.c
@@ -125,7 +125,8 @@
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C3XXXIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62x/adf_drv.c b/drivers/crypto/qat/qat_c62x/adf_drv.c
index 5b2d78a..dcdb94c 100644
--- a/drivers/crypto/qat/qat_c62x/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62x/adf_drv.c
@@ -123,7 +123,8 @@
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C62X_PCI_DEVICE_ID:
@@ -235,8 +236,7 @@
/* Find and map all the device's BARS */
i = (hw_data->fuses & ADF_DEVICE_FUSECTL_MASK) ? 1 : 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_c62xvf/adf_drv.c b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
index 7540ce1..cd9e634 100644
--- a/drivers/crypto/qat/qat_c62xvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_c62xvf/adf_drv.c
@@ -125,7 +125,8 @@
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_C62XIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
index 4d2de28..3417443 100644
--- a/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xcc/adf_drv.c
@@ -123,7 +123,8 @@
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_DH895XCC_PCI_DEVICE_ID:
@@ -237,8 +238,7 @@
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
index 60df986..15de9cb 100644
--- a/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
+++ b/drivers/crypto/qat/qat_dh895xccvf/adf_drv.c
@@ -125,7 +125,8 @@
struct adf_hw_device_data *hw_data;
char name[ADF_DEVICE_NAME_LENGTH];
unsigned int i, bar_nr;
- int ret, bar_mask;
+ unsigned long bar_mask;
+ int ret;
switch (ent->device) {
case ADF_DH895XCCIOV_PCI_DEVICE_ID:
@@ -215,8 +216,7 @@
/* Find and map all the device's BARS */
i = 0;
bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
- for_each_set_bit(bar_nr, (const unsigned long *)&bar_mask,
- ADF_PCI_MAX_BARS * 2) {
+ for_each_set_bit(bar_nr, &bar_mask, ADF_PCI_MAX_BARS * 2) {
struct adf_bar *bar = &accel_pci_dev->pci_bars[i++];
bar->base_addr = pci_resource_start(pdev, bar_nr);
diff --git a/drivers/devfreq/governor_gpubw_mon.c b/drivers/devfreq/governor_gpubw_mon.c
index 9c24eef..f7bb7eb 100644
--- a/drivers/devfreq/governor_gpubw_mon.c
+++ b/drivers/devfreq/governor_gpubw_mon.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -224,10 +224,11 @@
case DEVFREQ_GOV_SUSPEND:
{
struct devfreq_msm_adreno_tz_data *priv = devfreq->data;
-
- priv->bus.total_time = 0;
- priv->bus.gpu_time = 0;
- priv->bus.ram_time = 0;
+ if (priv) {
+ priv->bus.total_time = 0;
+ priv->bus.gpu_time = 0;
+ priv->bus.ram_time = 0;
+ }
}
break;
default:
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index bea71fb..7335a86 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -1059,14 +1059,14 @@
err = device_add(mci_pdev);
if (err < 0)
- goto out_dev_free;
+ goto out_put_device;
edac_dbg(0, "device %s created\n", dev_name(mci_pdev));
return 0;
- out_dev_free:
- kfree(mci_pdev);
+ out_put_device:
+ put_device(mci_pdev);
out:
return err;
}
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 8a68a5e..b609320 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1177,15 +1177,14 @@
rc = device_add(pvt->addrmatch_dev);
if (rc < 0)
- return rc;
+ goto err_put_addrmatch;
if (!pvt->is_registered) {
pvt->chancounts_dev = kzalloc(sizeof(*pvt->chancounts_dev),
GFP_KERNEL);
if (!pvt->chancounts_dev) {
- put_device(pvt->addrmatch_dev);
- device_del(pvt->addrmatch_dev);
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto err_del_addrmatch;
}
pvt->chancounts_dev->type = &all_channel_counts_type;
@@ -1199,9 +1198,18 @@
rc = device_add(pvt->chancounts_dev);
if (rc < 0)
- return rc;
+ goto err_put_chancounts;
}
return 0;
+
+err_put_chancounts:
+ put_device(pvt->chancounts_dev);
+err_del_addrmatch:
+ device_del(pvt->addrmatch_dev);
+err_put_addrmatch:
+ put_device(pvt->addrmatch_dev);
+
+ return rc;
}
static void i7core_delete_sysfs_devices(struct mem_ctl_info *mci)
@@ -1211,11 +1219,11 @@
edac_dbg(1, "\n");
if (!pvt->is_registered) {
- put_device(pvt->chancounts_dev);
device_del(pvt->chancounts_dev);
+ put_device(pvt->chancounts_dev);
}
- put_device(pvt->addrmatch_dev);
device_del(pvt->addrmatch_dev);
+ put_device(pvt->addrmatch_dev);
}
/****************************************************************************
diff --git a/drivers/gpio/gpio-adp5588.c b/drivers/gpio/gpio-adp5588.c
index c0f718b..c85407a 100644
--- a/drivers/gpio/gpio-adp5588.c
+++ b/drivers/gpio/gpio-adp5588.c
@@ -41,6 +41,8 @@
uint8_t int_en[3];
uint8_t irq_mask[3];
uint8_t irq_stat[3];
+ uint8_t int_input_en[3];
+ uint8_t int_lvl_cached[3];
};
static int adp5588_gpio_read(struct i2c_client *client, u8 reg)
@@ -173,12 +175,28 @@
struct adp5588_gpio *dev = irq_data_get_irq_chip_data(d);
int i;
- for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++)
+ for (i = 0; i <= ADP5588_BANK(ADP5588_MAXGPIO); i++) {
+ if (dev->int_input_en[i]) {
+ mutex_lock(&dev->lock);
+ dev->dir[i] &= ~dev->int_input_en[i];
+ dev->int_input_en[i] = 0;
+ adp5588_gpio_write(dev->client, GPIO_DIR1 + i,
+ dev->dir[i]);
+ mutex_unlock(&dev->lock);
+ }
+
+ if (dev->int_lvl_cached[i] != dev->int_lvl[i]) {
+ dev->int_lvl_cached[i] = dev->int_lvl[i];
+ adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + i,
+ dev->int_lvl[i]);
+ }
+
if (dev->int_en[i] ^ dev->irq_mask[i]) {
dev->int_en[i] = dev->irq_mask[i];
adp5588_gpio_write(dev->client, GPIO_INT_EN1 + i,
dev->int_en[i]);
}
+ }
mutex_unlock(&dev->irq_lock);
}
@@ -221,9 +239,7 @@
else
return -EINVAL;
- adp5588_gpio_direction_input(&dev->gpio_chip, gpio);
- adp5588_gpio_write(dev->client, GPIO_INT_LVL1 + bank,
- dev->int_lvl[bank]);
+ dev->int_input_en[bank] |= bit;
return 0;
}
diff --git a/drivers/gpio/gpio-menz127.c b/drivers/gpio/gpio-menz127.c
index a1210e3..95061d2 100644
--- a/drivers/gpio/gpio-menz127.c
+++ b/drivers/gpio/gpio-menz127.c
@@ -56,9 +56,9 @@
rnd = fls(debounce) - 1;
if (rnd && (debounce & BIT(rnd - 1)))
- debounce = round_up(debounce, MEN_Z127_DB_MIN_US);
+ debounce = roundup(debounce, MEN_Z127_DB_MIN_US);
else
- debounce = round_down(debounce, MEN_Z127_DB_MIN_US);
+ debounce = rounddown(debounce, MEN_Z127_DB_MIN_US);
if (debounce > MEN_Z127_DB_MAX_US)
debounce = MEN_Z127_DB_MAX_US;
diff --git a/drivers/gpio/gpiolib-of.c b/drivers/gpio/gpiolib-of.c
index 193f15d..aac8432 100644
--- a/drivers/gpio/gpiolib-of.c
+++ b/drivers/gpio/gpiolib-of.c
@@ -31,6 +31,7 @@
struct of_phandle_args *gpiospec = data;
return chip->gpiodev->dev.of_node == gpiospec->np &&
+ chip->of_xlate &&
chip->of_xlate(chip, gpiospec, NULL) >= 0;
}
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index dd00764..2ec402a 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -471,7 +471,7 @@
if (ret)
goto out_free_descs;
lh->descs[i] = desc;
- count = i;
+ count = i + 1;
if (lflags & GPIOHANDLE_REQUEST_ACTIVE_LOW)
set_bit(FLAG_ACTIVE_LOW, &desc->flags);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index 47951f4..d47c32a 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -505,7 +505,7 @@
while (true) {
temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
- if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
+ if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
break;
if (timeout <= 0)
return -ETIME;
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 564362e..c8a5cf5 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5551,6 +5551,11 @@
if (!(adev->pg_flags & AMD_PG_SUPPORT_GFX_PG))
return 0;
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -5586,7 +5591,11 @@
default:
break;
}
-
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 71d2856..f61c489 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -1350,8 +1350,6 @@
return ret;
}
- kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
-
if (adev->irq.installed &&
amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@@ -3086,7 +3084,7 @@
else
adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex);
-
+ amdgpu_pm_compute_clocks(adev);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index 3fa8320..4826bef 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6959,7 +6959,6 @@
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev);
- ni_update_current_ps(adev, boot_ps);
return 0;
}
@@ -7836,7 +7835,7 @@
else
adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex);
-
+ amdgpu_pm_compute_clocks(adev);
return ret;
}
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 9280358..59d4847 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -378,6 +378,7 @@
goto irq_init_fail;
ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
+ drm_crtc_vblank_reset(&malidp->crtc);
if (ret < 0) {
DRM_ERROR("failed to initialise vblank\n");
goto vblank_fail;
diff --git a/drivers/gpu/drm/msm/dp/dp_display.c b/drivers/gpu/drm/msm/dp/dp_display.c
index a0a0daf..3664700 100644
--- a/drivers/gpu/drm/msm/dp/dp_display.c
+++ b/drivers/gpu/drm/msm/dp/dp_display.c
@@ -86,7 +86,7 @@
struct workqueue_struct *wq;
struct delayed_work hdcp_cb_work;
- struct delayed_work connect_work;
+ struct work_struct connect_work;
struct work_struct attention_work;
struct mutex hdcp_mutex;
struct mutex session_lock;
@@ -456,7 +456,7 @@
/* if cable is already connected, send notification */
if (dp->usbpd->hpd_high)
- queue_delayed_work(dp->wq, &dp->connect_work, HZ * 10);
+ queue_work(dp->wq, &dp->connect_work);
else
dp_display->post_open = NULL;
}
@@ -464,15 +464,12 @@
static int dp_display_send_hpd_notification(struct dp_display_private *dp,
bool hpd)
{
- u32 timeout_sec;
int ret = 0;
dp->dp_display.is_connected = hpd;
- if (dp_display_framework_ready(dp))
- timeout_sec = 5;
- else
- timeout_sec = 10;
+ if (!dp_display_framework_ready(dp))
+ return ret;
dp->aux->state |= DP_STATE_NOTIFICATION_SENT;
@@ -480,7 +477,7 @@
dp_display_send_hpd_event(dp);
if (!wait_for_completion_timeout(&dp->notification_comp,
- HZ * timeout_sec)) {
+ HZ * 5)) {
pr_warn("%s timeout\n", hpd ? "connect" : "disconnect");
ret = -EINVAL;
}
@@ -614,9 +611,9 @@
dp_display_host_init(dp);
- /* check for hpd high and framework ready */
- if (dp->usbpd->hpd_high && dp_display_framework_ready(dp))
- queue_delayed_work(dp->wq, &dp->connect_work, 0);
+ /* check for hpd high */
+ if (dp->usbpd->hpd_high)
+ queue_work(dp->wq, &dp->connect_work);
end:
return rc;
}
@@ -695,7 +692,7 @@
dp->aux->abort(dp->aux);
/* wait for idle state */
- cancel_delayed_work(&dp->connect_work);
+ cancel_work(&dp->connect_work);
cancel_work(&dp->attention_work);
flush_workqueue(dp->wq);
@@ -737,7 +734,7 @@
return;
}
- queue_delayed_work(dp->wq, &dp->connect_work, 0);
+ queue_work(dp->wq, &dp->connect_work);
return;
}
@@ -783,18 +780,12 @@
return -ENODEV;
}
- /* check if framework is ready */
- if (!dp_display_framework_ready(dp)) {
- pr_err("framework not ready\n");
- return -ENODEV;
- }
-
if (dp->usbpd->hpd_irq && dp->usbpd->hpd_high &&
dp->power_on) {
dp->link->process_request(dp->link);
queue_work(dp->wq, &dp->attention_work);
} else if (dp->usbpd->hpd_high) {
- queue_delayed_work(dp->wq, &dp->connect_work, 0);
+ queue_work(dp->wq, &dp->connect_work);
} else {
/* cancel any pending request */
atomic_set(&dp->aborted, 1);
@@ -802,7 +793,7 @@
dp->aux->abort(dp->aux);
/* wait for idle state */
- cancel_delayed_work(&dp->connect_work);
+ cancel_work(&dp->connect_work);
cancel_work(&dp->attention_work);
flush_workqueue(dp->wq);
@@ -815,8 +806,7 @@
static void dp_display_connect_work(struct work_struct *work)
{
- struct delayed_work *dw = to_delayed_work(work);
- struct dp_display_private *dp = container_of(dw,
+ struct dp_display_private *dp = container_of(work,
struct dp_display_private, connect_work);
if (dp->dp_display.is_connected && dp_display_framework_ready(dp)) {
@@ -1376,7 +1366,7 @@
}
INIT_DELAYED_WORK(&dp->hdcp_cb_work, dp_display_hdcp_cb_work);
- INIT_DELAYED_WORK(&dp->connect_work, dp_display_connect_work);
+ INIT_WORK(&dp->connect_work, dp_display_connect_work);
INIT_WORK(&dp->attention_work, dp_display_attention_work);
return 0;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
index f06ceb7..4409408 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -348,6 +348,24 @@
struct sde_encoder_phys *phys_enc)
{
struct sde_encoder_irq *irq;
+ struct sde_kms *sde_kms = phys_enc->sde_kms;
+ int ret = 0;
+
+ mutex_lock(&sde_kms->vblank_ctl_global_lock);
+
+ if (atomic_read(&phys_enc->vblank_refcount)) {
+ SDE_ERROR(
+ "vblank_refcount mismatch detected, try to reset %d\n",
+ atomic_read(&phys_enc->vblank_refcount));
+ ret = sde_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_RDPTR);
+ if (ret)
+ SDE_ERROR(
+ "control vblank irq registration error %d\n",
+ ret);
+
+ }
+ atomic_set(&phys_enc->vblank_refcount, 0);
irq = &phys_enc->irq[INTR_IDX_CTL_START];
irq->hw_idx = phys_enc->hw_ctl->idx;
@@ -368,6 +386,8 @@
irq = &phys_enc->irq[INTR_IDX_AUTOREFRESH_DONE];
irq->hw_idx = phys_enc->hw_pp->idx;
irq->irq_idx = -EINVAL;
+
+ mutex_unlock(&sde_kms->vblank_ctl_global_lock);
}
static void sde_encoder_phys_cmd_cont_splash_mode_set(
@@ -674,13 +694,15 @@
to_sde_encoder_phys_cmd(phys_enc);
int ret = 0;
int refcount;
+ struct sde_kms *sde_kms;
if (!phys_enc || !phys_enc->hw_pp) {
SDE_ERROR("invalid encoder\n");
return -EINVAL;
}
+ sde_kms = phys_enc->sde_kms;
- mutex_lock(phys_enc->vblank_ctl_lock);
+ mutex_lock(&sde_kms->vblank_ctl_global_lock);
refcount = atomic_read(&phys_enc->vblank_refcount);
/* Slave encoders don't report vblank */
@@ -698,11 +720,17 @@
SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
enable, refcount);
- if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1) {
ret = sde_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
- else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ if (ret)
+ atomic_dec_return(&phys_enc->vblank_refcount);
+ } else if (!enable &&
+ atomic_dec_return(&phys_enc->vblank_refcount) == 0) {
ret = sde_encoder_helper_unregister_irq(phys_enc,
INTR_IDX_RDPTR);
+ if (ret)
+ atomic_inc_return(&phys_enc->vblank_refcount);
+ }
end:
if (ret) {
@@ -714,7 +742,7 @@
enable, refcount, SDE_EVTLOG_ERROR);
}
- mutex_unlock(phys_enc->vblank_ctl_lock);
+ mutex_unlock(&sde_kms->vblank_ctl_global_lock);
return ret;
}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index ed0e7b8..c237f27 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -3420,6 +3420,8 @@
dev->mode_config.max_height = sde_kms->catalog->max_display_height;
mutex_init(&sde_kms->secure_transition_lock);
+ mutex_init(&sde_kms->vblank_ctl_global_lock);
+
atomic_set(&sde_kms->detach_sec_cb, 0);
atomic_set(&sde_kms->detach_all_cb, 0);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index fb79fe7..2f1d418 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -288,6 +288,7 @@
atomic_t detach_sec_cb;
atomic_t detach_all_cb;
struct mutex secure_transition_lock;
+ struct mutex vblank_ctl_global_lock;
bool first_kickoff;
};
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
index 2e1422f..5dd3abf 100644
--- a/drivers/gpu/drm/msm/sde_dbg.c
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -3047,6 +3047,37 @@
return 0;
}
+/**
+ * sde_dbg_reg_base_is_valid_range - verify if requested memory range is valid
+ * @off: address offset in bytes
+ * @cnt: memory size in bytes
+ * Return: true if valid; false otherwise
+ */
+static bool sde_dbg_reg_base_is_valid_range(u32 off, u32 cnt)
+{
+ static struct sde_dbg_base *dbg_base = &sde_dbg_base;
+ struct sde_dbg_reg_range *node;
+ struct sde_dbg_reg_base *base;
+
+ pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt);
+
+ list_for_each_entry(base, &dbg_base->reg_base_list, reg_base_head) {
+ list_for_each_entry(node, &base->sub_range_list, head) {
+ pr_debug("%s: start=0x%x end=0x%x\n", node->range_name,
+ node->offset.start, node->offset.end);
+
+ if (node->offset.start <= off
+ && off <= node->offset.end
+ && off + cnt <= node->offset.end) {
+ pr_debug("valid range requested\n");
+ return true;
+ }
+ }
+ }
+
+ pr_err("invalid range requested\n");
+ return false;
+}
/**
* sde_dbg_reg_base_offset_write - set new offset and len to debugfs reg base
@@ -3093,6 +3124,9 @@
if (cnt == 0)
return -EINVAL;
+ if (!sde_dbg_reg_base_is_valid_range(off, cnt))
+ return -EINVAL;
+
mutex_lock(&sde_dbg_base.mutex);
dbg->off = off;
dbg->cnt = cnt;
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 56c288f..5bfae1f 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -271,12 +271,16 @@
nv_connector->edid = NULL;
}
- /* Outputs are only polled while runtime active, so acquiring a
- * runtime PM ref here is unnecessary (and would deadlock upon
- * runtime suspend because it waits for polling to finish).
+ /* Outputs are only polled while runtime active, so resuming the
+ * device here is unnecessary (and would deadlock upon runtime suspend
+ * because it waits for polling to finish). We do however, want to
+ * prevent the autosuspend timer from elapsing during this operation
+ * if possible.
*/
- if (!drm_kms_helper_is_poll_worker()) {
- ret = pm_runtime_get_sync(connector->dev->dev);
+ if (drm_kms_helper_is_poll_worker()) {
+ pm_runtime_get_noresume(dev->dev);
+ } else {
+ ret = pm_runtime_get_sync(dev->dev);
if (ret < 0 && ret != -EACCES)
return conn_status;
}
@@ -354,10 +358,8 @@
out:
- if (!drm_kms_helper_is_poll_worker()) {
- pm_runtime_mark_last_busy(connector->dev->dev);
- pm_runtime_put_autosuspend(connector->dev->dev);
- }
+ pm_runtime_mark_last_busy(dev->dev);
+ pm_runtime_put_autosuspend(dev->dev);
return conn_status;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index 6526a33..3ddd409 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -367,8 +367,6 @@
pm_runtime_get_sync(drm->dev->dev);
drm_helper_hpd_irq_event(drm->dev);
- /* enable polling for external displays */
- drm_kms_helper_poll_enable(drm->dev);
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
@@ -391,15 +389,29 @@
{
struct nouveau_drm *drm = container_of(nb, typeof(*drm), acpi_nb);
struct acpi_bus_event *info = data;
+ int ret;
if (!strcmp(info->device_class, ACPI_VIDEO_CLASS)) {
if (info->type == ACPI_VIDEO_NOTIFY_PROBE) {
- /*
- * This may be the only indication we receive of a
- * connector hotplug on a runtime suspended GPU,
- * schedule hpd_work to check.
- */
- schedule_work(&drm->hpd_work);
+ ret = pm_runtime_get(drm->dev->dev);
+ if (ret == 1 || ret == -EACCES) {
+ /* If the GPU is already awake, or in a state
+ * where we can't wake it up, it can handle
+ * it's own hotplug events.
+ */
+ pm_runtime_put_autosuspend(drm->dev->dev);
+ } else if (ret == 0) {
+ /* This may be the only indication we receive
+ * of a connector hotplug on a runtime
+ * suspended GPU, schedule hpd_work to check.
+ */
+ NV_DEBUG(drm, "ACPI requested connector reprobe\n");
+ schedule_work(&drm->hpd_work);
+ pm_runtime_put_noidle(drm->dev->dev);
+ } else {
+ NV_WARN(drm, "Dropped ACPI reprobe event due to RPM error: %d\n",
+ ret);
+ }
/* acpi-video should not generate keypresses for this */
return NOTIFY_BAD;
@@ -422,6 +434,11 @@
if (ret)
return ret;
+ /* enable connector detection and polling for connectors without HPD
+ * support
+ */
+ drm_kms_helper_poll_enable(dev);
+
/* enable hotplug interrupts */
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
struct nouveau_connector *conn = nouveau_connector(connector);
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
index a410c0d..6a1b81e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm200.c
@@ -161,7 +161,8 @@
}
/* load and execute some other ucode image (bios therm?) */
- return pmu_load(init, 0x01, post, NULL, NULL);
+ pmu_load(init, 0x01, post, NULL, NULL);
+ return 0;
}
static const struct nvkm_devinit_func
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index aad2f4a..97828fa 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -283,7 +283,6 @@
remote = of_graph_get_remote_port_parent(ep);
if (!remote) {
DRM_DEBUG_DRIVER("Error retrieving the output node\n");
- of_node_put(remote);
continue;
}
@@ -297,11 +296,13 @@
if (of_graph_parse_endpoint(ep, &endpoint)) {
DRM_DEBUG_DRIVER("Couldn't parse endpoint\n");
+ of_node_put(remote);
continue;
}
if (!endpoint.id) {
DRM_DEBUG_DRIVER("Endpoint is our panel... skipping\n");
+ of_node_put(remote);
continue;
}
}
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index f8c9f6f..a2d8630 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -327,6 +327,9 @@
vc4_state->y_scaling[0] = vc4_get_scaling_mode(vc4_state->src_h[0],
vc4_state->crtc_h);
+ vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
+ vc4_state->y_scaling[0] == VC4_SCALING_NONE);
+
if (num_planes > 1) {
vc4_state->is_yuv = true;
@@ -342,24 +345,17 @@
vc4_get_scaling_mode(vc4_state->src_h[1],
vc4_state->crtc_h);
- /* YUV conversion requires that scaling be enabled,
- * even on a plane that's otherwise 1:1. Choose TPZ
- * for simplicity.
+ /* YUV conversion requires that horizontal scaling be enabled,
+ * even on a plane that's otherwise 1:1. Looks like only PPF
+ * works in that case, so let's pick that one.
*/
- if (vc4_state->x_scaling[0] == VC4_SCALING_NONE)
- vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
- if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
- vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+ if (vc4_state->is_unity)
+ vc4_state->x_scaling[0] = VC4_SCALING_PPF;
} else {
vc4_state->x_scaling[1] = VC4_SCALING_NONE;
vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
- vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
- vc4_state->y_scaling[0] == VC4_SCALING_NONE &&
- vc4_state->x_scaling[1] == VC4_SCALING_NONE &&
- vc4_state->y_scaling[1] == VC4_SCALING_NONE);
-
/* No configuring scaling on the cursor plane, since it gets
non-vblank-synced updates, and scaling requires requires
LBM changes which have to be vblank-synced.
@@ -614,7 +610,10 @@
vc4_dlist_write(vc4_state, SCALER_CSC2_ITR_R_601_5);
}
- if (!vc4_state->is_unity) {
+ if (vc4_state->x_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->x_scaling[1] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
+ vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
/* LBM Base Address. */
if (vc4_state->y_scaling[0] != VC4_SCALING_NONE ||
vc4_state->y_scaling[1] != VC4_SCALING_NONE) {
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 2a1d352..aef802c 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -137,8 +137,11 @@
break;
}
case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
- seq_printf(s, "sync: [%pK] %s", sync_event->handle,
- sync_event->fence_name);
+ int i;
+
+ for (i = 0; i < sync_event->info.num_fences; i++)
+ seq_printf(s, "sync: %s",
+ sync_event->info.fences[i].name);
break;
}
default:
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index eb4c29d..aba9ed0 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2018,7 +2018,7 @@
}
handle = kgsl_sync_fence_async_wait(event.fd,
- gpuobj_free_fence_func, entry, NULL, 0);
+ gpuobj_free_fence_func, entry, NULL);
if (IS_ERR(handle)) {
kgsl_mem_entry_unset_pend(entry);
diff --git a/drivers/gpu/msm/kgsl_drawobj.c b/drivers/gpu/msm/kgsl_drawobj.c
index 3dbaea4..a1e0f4c 100644
--- a/drivers/gpu/msm/kgsl_drawobj.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -43,6 +43,17 @@
static struct kmem_cache *memobjs_cache;
static struct kmem_cache *sparseobjs_cache;
+static void free_fence_names(struct kgsl_drawobj_sync *syncobj)
+{
+ unsigned int i;
+
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
+
+ if (event->type == KGSL_CMD_SYNCPOINT_TYPE_FENCE)
+ kfree(event->info.fences);
+ }
+}
void kgsl_drawobj_destroy_object(struct kref *kref)
{
@@ -55,6 +66,7 @@
switch (drawobj->type) {
case SYNCOBJ_TYPE:
syncobj = SYNCOBJ(drawobj);
+ free_fence_names(syncobj);
kfree(syncobj->synclist);
kfree(syncobj);
break;
@@ -94,11 +106,16 @@
retired);
break;
}
- case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
- dev_err(device->dev, " fence: %s\n",
- event->fence_name);
+ case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
+ int j;
+ struct event_fence_info *info = &event->info;
+
+ for (j = 0; j < info->num_fences; j++)
+ dev_err(device->dev, "[%d] fence: %s\n",
+ i, info->fences[j].name);
break;
}
+ }
}
}
@@ -146,11 +163,16 @@
dev_err(device->dev, " [%d] TIMESTAMP %d:%d\n",
i, event->context->id, event->timestamp);
break;
- case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
- dev_err(device->dev, " [%d] FENCE %s\n",
- i, event->fence_name);
+ case KGSL_CMD_SYNCPOINT_TYPE_FENCE: {
+ int j;
+ struct event_fence_info *info = &event->info;
+
+ for (j = 0; j < info->num_fences; j++)
+ dev_err(device->dev, " [%d] FENCE %s\n",
+ i, info->fences[j].name);
break;
}
+ }
}
kgsl_drawobj_put(drawobj);
@@ -332,8 +354,11 @@
static bool drawobj_sync_fence_func(void *priv)
{
struct kgsl_drawobj_sync_event *event = priv;
+ int i;
- trace_syncpoint_fence_expire(event->syncobj, event->fence_name);
+ for (i = 0; i < event->info.num_fences; i++)
+ trace_syncpoint_fence_expire(event->syncobj,
+ event->info.fences[i].name);
/*
* Only call kgsl_drawobj_put() if it's not marked for cancellation
@@ -359,7 +384,7 @@
struct kgsl_cmd_syncpoint_fence *sync = priv;
struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
struct kgsl_drawobj_sync_event *event;
- unsigned int id;
+ unsigned int id, i;
kref_get(&drawobj->refcount);
@@ -377,7 +402,7 @@
event->handle = kgsl_sync_fence_async_wait(sync->fd,
drawobj_sync_fence_func, event,
- event->fence_name, sizeof(event->fence_name));
+ &event->info);
if (IS_ERR_OR_NULL(event->handle)) {
int ret = PTR_ERR(event->handle);
@@ -397,7 +422,8 @@
return ret;
}
- trace_syncpoint_fence(syncobj, event->fence_name);
+ for (i = 0; i < event->info.num_fences; i++)
+ trace_syncpoint_fence(syncobj, event->info.fences[i].name);
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
index 06eef7f..bd32f5e 100644
--- a/drivers/gpu/msm/kgsl_drawobj.h
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -107,6 +107,15 @@
#define KGSL_FENCE_NAME_LEN 74
+struct fence_info {
+ char name[KGSL_FENCE_NAME_LEN];
+};
+
+struct event_fence_info {
+ struct fence_info *fences;
+ int num_fences;
+};
+
/**
* struct kgsl_drawobj_sync_event
* @id: identifer (positiion within the pending bitmap)
@@ -116,8 +125,8 @@
* register this event
* @timestamp: Pending timestamp for the event
* @handle: Pointer to a sync fence handle
- * @fence_name: A fence name string to describe the fence
* @device: Pointer to the KGSL device
+ * @info: structure to hold info about the fence
*/
struct kgsl_drawobj_sync_event {
unsigned int id;
@@ -126,8 +135,8 @@
struct kgsl_context *context;
unsigned int timestamp;
struct kgsl_sync_fence_cb *handle;
- char fence_name[KGSL_FENCE_NAME_LEN];
struct kgsl_device *device;
+ struct event_fence_info info;
};
/**
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index d484894..98c117d 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -438,27 +438,54 @@
}
}
-static void kgsl_get_fence_name(struct fence *fence,
- char *fence_name, int name_len)
+static void kgsl_get_fence_names(struct fence *fence,
+ struct event_fence_info *info_ptr)
{
- char *ptr = fence_name;
- char *last = fence_name + name_len;
+ unsigned int num_fences;
+ struct fence **fences;
+ struct fence_array *array;
+ int i;
- ptr += snprintf(ptr, last - ptr, "%s %s",
- fence->ops->get_driver_name(fence),
- fence->ops->get_timeline_name(fence));
-
- if ((ptr + 2) >= last)
+ if (!info_ptr)
return;
- if (fence->ops->fence_value_str) {
- ptr += snprintf(ptr, last - ptr, ": ");
- fence->ops->fence_value_str(fence, ptr, last - ptr);
+ array = to_fence_array(fence);
+
+ if (array != NULL) {
+ num_fences = array->num_fences;
+ fences = array->fences;
+ } else {
+ num_fences = 1;
+ fences = &fence;
+ }
+
+ info_ptr->fences = kcalloc(num_fences, sizeof(struct fence_info),
+ GFP_ATOMIC);
+ if (info_ptr->fences == NULL)
+ return;
+
+ info_ptr->num_fences = num_fences;
+
+ for (i = 0; i < num_fences; i++) {
+ struct fence *f = fences[i];
+ struct fence_info *fi = &info_ptr->fences[i];
+ int len;
+
+ len = scnprintf(fi->name, sizeof(fi->name), "%s %s",
+ f->ops->get_driver_name(f),
+ f->ops->get_timeline_name(f));
+
+ if (f->ops->fence_value_str) {
+ len += scnprintf(fi->name + len, sizeof(fi->name) - len,
+ ": ");
+ f->ops->fence_value_str(f, fi->name + len,
+ sizeof(fi->name) - len);
+ }
}
}
struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
- bool (*func)(void *priv), void *priv, char *fence_name, int name_len)
+ bool (*func)(void *priv), void *priv, struct event_fence_info *info_ptr)
{
struct kgsl_sync_fence_cb *kcb;
struct fence *fence;
@@ -479,8 +506,7 @@
kcb->priv = priv;
kcb->func = func;
- if (fence_name)
- kgsl_get_fence_name(fence, fence_name, name_len);
+ kgsl_get_fence_names(fence, info_ptr);
/* if status then error or signaled */
status = fence_add_callback(fence, &kcb->fence_cb,
diff --git a/drivers/gpu/msm/kgsl_sync.h b/drivers/gpu/msm/kgsl_sync.h
index 6998b40..a53cd51 100644
--- a/drivers/gpu/msm/kgsl_sync.h
+++ b/drivers/gpu/msm/kgsl_sync.h
@@ -1,3 +1,4 @@
+
/* Copyright (c) 2012-2014,2017-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -93,7 +94,7 @@
struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
bool (*func)(void *priv), void *priv,
- char *fence_name, int name_len);
+ struct event_fence_info *info_ptr);
void kgsl_sync_fence_async_cancel(struct kgsl_sync_fence_cb *kcb);
@@ -139,7 +140,7 @@
static inline struct kgsl_sync_fence_cb *kgsl_sync_fence_async_wait(int fd,
bool (*func)(void *priv), void *priv,
- char *fence_name, int name_len)
+ struct event_fence_info *info_ptr)
{
return NULL;
}
diff --git a/drivers/hid/hid-apple.c b/drivers/hid/hid-apple.c
index cb2e85c..a8b8058 100644
--- a/drivers/hid/hid-apple.c
+++ b/drivers/hid/hid-apple.c
@@ -333,7 +333,8 @@
struct hid_field *field, struct hid_usage *usage,
unsigned long **bit, int *max)
{
- if (usage->hid == (HID_UP_CUSTOM | 0x0003)) {
+ if (usage->hid == (HID_UP_CUSTOM | 0x0003) ||
+ usage->hid == (HID_UP_MSVENDOR | 0x0003)) {
/* The fn key on Apple USB keyboards */
set_bit(EV_REP, hi->input->evbit);
hid_map_usage_clear(hi, usage, bit, max, EV_KEY, KEY_FN);
@@ -479,6 +480,12 @@
.driver_data = APPLE_NUMLOCK_EMULATION | APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
.driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+ .driver_data = APPLE_HAS_FN },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI),
+ .driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ANSI),
.driver_data = APPLE_HAS_FN },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_WELLSPRING_ISO),
diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
index 2059d9d..81efba5 100644
--- a/drivers/hid/hid-core.c
+++ b/drivers/hid/hid-core.c
@@ -1855,6 +1855,9 @@
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
+ { HID_BLUETOOTH_DEVICE(BT_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_FOUNTAIN_TP_ONLY) },
{ HID_USB_DEVICE(USB_VENDOR_ID_APPLE, USB_DEVICE_ID_APPLE_GEYSER1_TP_ONLY) },
{ HID_I2C_DEVICE(USB_VENDOR_ID_ASUSTEK, USB_DEVICE_ID_ASUSTEK_NOTEBOOK_KEYBOARD) },
@@ -2064,6 +2067,10 @@
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS3_CONTROLLER) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2) },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGX_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_VAIO_VGP_MOUSE) },
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER) },
diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h
index 0280e28..a784464 100644
--- a/drivers/hid/hid-ids.h
+++ b/drivers/hid/hid-ids.h
@@ -83,6 +83,7 @@
#define USB_DEVICE_ID_ANTON_TOUCH_PAD 0x3101
#define USB_VENDOR_ID_APPLE 0x05ac
+#define BT_VENDOR_ID_APPLE 0x004c
#define USB_DEVICE_ID_APPLE_MIGHTYMOUSE 0x0304
#define USB_DEVICE_ID_APPLE_MAGICMOUSE 0x030d
#define USB_DEVICE_ID_APPLE_MAGICTRACKPAD 0x030e
@@ -152,6 +153,7 @@
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_ISO 0x0256
#define USB_DEVICE_ID_APPLE_ALU_WIRELESS_2011_JIS 0x0257
#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_ANSI 0x0267
+#define USB_DEVICE_ID_APPLE_MAGIC_KEYBOARD_NUMPAD_ANSI 0x026c
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ANSI 0x0290
#define USB_DEVICE_ID_APPLE_WELLSPRING8_ISO 0x0291
#define USB_DEVICE_ID_APPLE_WELLSPRING8_JIS 0x0292
@@ -888,6 +890,7 @@
#define USB_DEVICE_ID_SAITEK_RUMBLEPAD 0xff17
#define USB_DEVICE_ID_SAITEK_PS1000 0x0621
#define USB_DEVICE_ID_SAITEK_RAT7_OLD 0x0ccb
+#define USB_DEVICE_ID_SAITEK_RAT7_CONTAGION 0x0ccd
#define USB_DEVICE_ID_SAITEK_RAT7 0x0cd7
#define USB_DEVICE_ID_SAITEK_RAT9 0x0cfa
#define USB_DEVICE_ID_SAITEK_MMO7 0x0cd0
@@ -927,6 +930,8 @@
#define USB_DEVICE_ID_SONY_PS3_BDREMOTE 0x0306
#define USB_DEVICE_ID_SONY_PS3_CONTROLLER 0x0268
#define USB_DEVICE_ID_SONY_PS4_CONTROLLER 0x05c4
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_2 0x09cc
+#define USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE 0x0ba0
#define USB_DEVICE_ID_SONY_MOTION_CONTROLLER 0x03d5
#define USB_DEVICE_ID_SONY_NAVIGATION_CONTROLLER 0x042f
#define USB_DEVICE_ID_SONY_BUZZ_CONTROLLER 0x0002
diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c
index 1b0084d..28373da 100644
--- a/drivers/hid/hid-ntrig.c
+++ b/drivers/hid/hid-ntrig.c
@@ -955,6 +955,8 @@
ret = sysfs_create_group(&hdev->dev.kobj,
&ntrig_attribute_group);
+ if (ret)
+ hid_err(hdev, "cannot create sysfs group\n");
return 0;
err_free:
diff --git a/drivers/hid/hid-saitek.c b/drivers/hid/hid-saitek.c
index 39e6426..683861f 100644
--- a/drivers/hid/hid-saitek.c
+++ b/drivers/hid/hid-saitek.c
@@ -183,6 +183,8 @@
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT7_CONTAGION),
+ .driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_SAITEK, USB_DEVICE_ID_SAITEK_RAT9),
.driver_data = SAITEK_RELEASE_MODE_RAT7 },
{ HID_USB_DEVICE(USB_VENDOR_ID_MADCATZ, USB_DEVICE_ID_MADCATZ_RAT9),
diff --git a/drivers/hid/hid-sony.c b/drivers/hid/hid-sony.c
index 1b1dccd..eee58d1 100644
--- a/drivers/hid/hid-sony.c
+++ b/drivers/hid/hid-sony.c
@@ -2581,6 +2581,12 @@
.driver_data = DUALSHOCK4_CONTROLLER_USB },
{ HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER),
.driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
+ { HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_2),
+ .driver_data = DUALSHOCK4_CONTROLLER_BT },
+ { HID_USB_DEVICE(USB_VENDOR_ID_SONY, USB_DEVICE_ID_SONY_PS4_CONTROLLER_DONGLE),
+ .driver_data = DUALSHOCK4_CONTROLLER_USB },
/* Nyko Core Controller for PS3 */
{ HID_USB_DEVICE(USB_VENDOR_ID_SINO_LITE, USB_DEVICE_ID_SINO_LITE_CONTROLLER),
.driver_data = SIXAXIS_CONTROLLER_USB | SINO_LITE_CONTROLLER },
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 5e1fdc8..2fd0f119 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -616,21 +616,22 @@
NEGO_IN_PROGRESS,
NEGO_FINISHED} host_negotiatied = NEGO_NOT_STARTED;
- if (host_negotiatied == NEGO_NOT_STARTED &&
- kvp_transaction.state < HVUTIL_READY) {
+ if (kvp_transaction.state < HVUTIL_READY) {
/*
* If userspace daemon is not connected and host is asking
* us to negotiate we need to delay to not lose messages.
* This is important for Failover IP setting.
*/
- host_negotiatied = NEGO_IN_PROGRESS;
- schedule_delayed_work(&kvp_host_handshake_work,
+ if (host_negotiatied == NEGO_NOT_STARTED) {
+ host_negotiatied = NEGO_IN_PROGRESS;
+ schedule_delayed_work(&kvp_host_handshake_work,
HV_UTIL_NEGO_TIMEOUT * HZ);
+ }
return;
}
if (kvp_transaction.state > HVUTIL_READY)
return;
-
+recheck:
vmbus_recvpacket(channel, recv_buffer, PAGE_SIZE * 4, &recvlen,
&requestid);
@@ -707,6 +708,8 @@
VM_PKT_DATA_INBAND, 0);
host_negotiatied = NEGO_FINISHED;
+
+ goto recheck;
}
}
diff --git a/drivers/hwmon/adt7475.c b/drivers/hwmon/adt7475.c
index 3cefd1a..9c262d9 100644
--- a/drivers/hwmon/adt7475.c
+++ b/drivers/hwmon/adt7475.c
@@ -274,14 +274,18 @@
return clamp_val(reg, 0, 1023) & (0xff << 2);
}
-static u16 adt7475_read_word(struct i2c_client *client, int reg)
+static int adt7475_read_word(struct i2c_client *client, int reg)
{
- u16 val;
+ int val1, val2;
- val = i2c_smbus_read_byte_data(client, reg);
- val |= (i2c_smbus_read_byte_data(client, reg + 1) << 8);
+ val1 = i2c_smbus_read_byte_data(client, reg);
+ if (val1 < 0)
+ return val1;
+ val2 = i2c_smbus_read_byte_data(client, reg + 1);
+ if (val2 < 0)
+ return val2;
- return val;
+ return val1 | (val2 << 8);
}
static void adt7475_write_word(struct i2c_client *client, int reg, u16 val)
diff --git a/drivers/hwmon/ina2xx.c b/drivers/hwmon/ina2xx.c
index ac63e56..9ac6e16 100644
--- a/drivers/hwmon/ina2xx.c
+++ b/drivers/hwmon/ina2xx.c
@@ -17,7 +17,7 @@
* Bi-directional Current/Power Monitor with I2C Interface
* Datasheet: http://www.ti.com/product/ina230
*
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
* Thanks to Jan Volkering
*
* This program is free software; you can redistribute it and/or modify
@@ -328,6 +328,15 @@
return 0;
}
+static ssize_t ina2xx_show_shunt(struct device *dev,
+ struct device_attribute *da,
+ char *buf)
+{
+ struct ina2xx_data *data = dev_get_drvdata(dev);
+
+ return snprintf(buf, PAGE_SIZE, "%li\n", data->rshunt);
+}
+
static ssize_t ina2xx_store_shunt(struct device *dev,
struct device_attribute *da,
const char *buf, size_t count)
@@ -402,7 +411,7 @@
/* shunt resistance */
static SENSOR_DEVICE_ATTR(shunt_resistor, S_IRUGO | S_IWUSR,
- ina2xx_show_value, ina2xx_store_shunt,
+ ina2xx_show_shunt, ina2xx_store_shunt,
INA2XX_CALIBRATION);
/* update interval (ina226 only) */
diff --git a/drivers/hwmon/qpnp-adc-common.c b/drivers/hwmon/qpnp-adc-common.c
index c5653fe..555e95c 100644
--- a/drivers/hwmon/qpnp-adc-common.c
+++ b/drivers/hwmon/qpnp-adc-common.c
@@ -752,6 +752,61 @@
};
/* Voltage to temperature */
+static const struct qpnp_vadc_map_pt adcmap_batt_therm_qrd_215[] = {
+ {1575, -200},
+ {1549, -180},
+ {1522, -160},
+ {1493, -140},
+ {1463, -120},
+ {1431, -100},
+ {1398, -80},
+ {1364, -60},
+ {1329, -40},
+ {1294, -20},
+ {1258, 0},
+ {1222, 20},
+ {1187, 40},
+ {1151, 60},
+ {1116, 80},
+ {1082, 100},
+ {1049, 120},
+ {1016, 140},
+ {985, 160},
+ {955, 180},
+ {926, 200},
+ {899, 220},
+ {873, 240},
+ {849, 260},
+ {825, 280},
+ {804, 300},
+ {783, 320},
+ {764, 340},
+ {746, 360},
+ {729, 380},
+ {714, 400},
+ {699, 420},
+ {686, 440},
+ {673, 460},
+ {662, 480},
+ {651, 500},
+ {641, 520},
+ {632, 540},
+ {623, 560},
+ {615, 580},
+ {608, 600},
+ {601, 620},
+ {595, 640},
+ {589, 660},
+ {583, 680},
+ {578, 700},
+ {574, 720},
+ {569, 740},
+ {565, 760},
+ {562, 780},
+ {558, 800}
+};
+
+/* Voltage to temperature */
static const struct qpnp_vadc_map_pt adcmap_batt_therm_pu30[] = {
{1842, -400},
{1838, -380},
@@ -1263,19 +1318,6 @@
qpnp_adc_map_voltage_temp(adcmap_batt_therm_qrd,
ARRAY_SIZE(adcmap_batt_therm_qrd),
batt_thm_voltage, &adc_chan_result->physical);
- } else {
-
- qpnp_adc_scale_with_calib_param(adc_code,
- adc_properties, chan_properties, &batt_thm_voltage);
-
- adc_chan_result->measurement = batt_thm_voltage;
-
- return qpnp_adc_map_voltage_temp(
- adcmap_batt_therm_qrd,
- ARRAY_SIZE(adcmap_batt_therm_qrd),
- batt_thm_voltage,
- &adc_chan_result->physical);
-
}
return 0;
}
@@ -1337,6 +1379,34 @@
}
EXPORT_SYMBOL(qpnp_adc_batt_therm_pu400);
+int32_t qpnp_adc_batt_therm_qrd_215(struct qpnp_vadc_chip *chip,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_properties,
+ const struct qpnp_vadc_chan_properties *chan_properties,
+ struct qpnp_vadc_result *adc_chan_result)
+{
+ int64_t batt_thm_voltage = 0;
+
+ if (!chan_properties || !chan_properties->offset_gain_numerator ||
+ !chan_properties->offset_gain_denominator || !adc_properties
+ || !adc_chan_result)
+ return -EINVAL;
+
+ qpnp_adc_scale_with_calib_param(adc_code,
+ adc_properties, chan_properties, &batt_thm_voltage);
+
+ adc_chan_result->measurement = batt_thm_voltage;
+
+ return qpnp_adc_map_voltage_temp(
+ adcmap_batt_therm_qrd_215,
+ ARRAY_SIZE(adcmap_batt_therm_qrd_215),
+ batt_thm_voltage,
+ &adc_chan_result->physical);
+
+ return 0;
+}
+EXPORT_SYMBOL(qpnp_adc_batt_therm_qrd_215);
+
int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *chip,
int32_t adc_code,
const struct qpnp_adc_properties *adc_properties,
@@ -2182,9 +2252,9 @@
pr_debug("warm_temp:%d and cool_temp:%d\n", param->high_temp,
param->low_temp);
- rc = qpnp_adc_map_voltage_temp(
- adcmap_batt_therm_qrd,
- ARRAY_SIZE(adcmap_batt_therm_qrd),
+ rc = qpnp_adc_map_temp_voltage(
+ adcmap_batt_therm_qrd_215,
+ ARRAY_SIZE(adcmap_batt_therm_qrd_215),
(param->low_temp),
&low_output);
if (rc) {
@@ -2197,9 +2267,9 @@
low_output = div64_s64(low_output, btm_param.adc_vref);
low_output += btm_param.adc_gnd;
- rc = qpnp_adc_map_voltage_temp(
- adcmap_batt_therm_qrd,
- ARRAY_SIZE(adcmap_batt_therm_qrd),
+ rc = qpnp_adc_map_temp_voltage(
+ adcmap_batt_therm_qrd_215,
+ ARRAY_SIZE(adcmap_batt_therm_qrd_215),
(param->high_temp),
&high_output);
if (rc) {
diff --git a/drivers/hwmon/qpnp-adc-voltage.c b/drivers/hwmon/qpnp-adc-voltage.c
index 5399670..b4f0dbf 100644
--- a/drivers/hwmon/qpnp-adc-voltage.c
+++ b/drivers/hwmon/qpnp-adc-voltage.c
@@ -34,6 +34,7 @@
#include <linux/platform_device.h>
#include <linux/power_supply.h>
#include <linux/thermal.h>
+#include <linux/qpnp/qpnp-revid.h>
/* QPNP VADC register definition */
#define QPNP_VADC_REVISION1 0x0
@@ -203,6 +204,7 @@
struct power_supply *vadc_chg_vote;
bool vadc_hc;
int vadc_debug_count;
+ struct pmic_revid_data *pmic_rev_id;
struct sensor_device_attribute sens_attr[0];
};
@@ -232,6 +234,7 @@
[SCALE_SMB1390_DIE_TEMP] = {qpnp_adc_scale_die_temp_1390},
[SCALE_BATT_THERM_TEMP_PU30] = {qpnp_adc_batt_therm_pu30},
[SCALE_BATT_THERM_TEMP_PU400] = {qpnp_adc_batt_therm_pu400},
+ [SCALE_BATT_THERM_TEMP_QRD_215] = {qpnp_adc_batt_therm_qrd_215}
};
static struct qpnp_vadc_rscale_fn adc_vadc_rscale_fn[] = {
@@ -465,6 +468,44 @@
pr_debug("VADC_DIG_PARAM value:0x%x\n", *data);
}
+static int qpnp_vadc_channel_check(struct qpnp_vadc_chip *vadc, u8 buf)
+{
+ int rc = 0;
+ u8 chno = 0;
+
+ rc = qpnp_vadc_read_reg(vadc,
+ QPNP_VADC_HC1_ADC_CH_SEL_CTL, &chno, 1);
+ if (rc < 0) {
+ pr_err("Channel reread failed\n");
+ return rc;
+ }
+
+ if (buf != chno) {
+ pr_debug("channel write fails once: written:0x%x actual:0x%x\n",
+ chno, buf);
+
+ rc = qpnp_vadc_write_reg(vadc,
+ QPNP_VADC_HC1_ADC_CH_SEL_CTL, &buf, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc register configure failed\n");
+ return rc;
+ }
+
+ rc = qpnp_vadc_read_reg(vadc,
+ QPNP_VADC_HC1_ADC_CH_SEL_CTL, &chno, 1);
+ if (rc < 0) {
+ pr_err("qpnp adc configure read failed\n");
+ return rc;
+ }
+
+ if (chno != buf) {
+ pr_err("Write fails twice: written: 0x%x\n", chno);
+ return -EINVAL;
+ }
+ }
+ return 0;
+}
+
static int qpnp_vadc_hc_pre_configure_usb_in(struct qpnp_vadc_chip *vadc,
int dt_index)
{
@@ -472,6 +513,11 @@
u8 buf;
u8 dig_param = 0;
struct qpnp_adc_amux_properties conv;
+ bool channel_check = false;
+
+ if (vadc->pmic_rev_id)
+ if (vadc->pmic_rev_id->pmic_subtype == PMI632_SUBTYPE)
+ channel_check = true;
/* Setup dig params for USB_IN_V */
conv.decimation = DECIMATION_TYPE2;
@@ -496,6 +542,12 @@
if (rc < 0)
return rc;
+ if (channel_check) {
+ rc = qpnp_vadc_channel_check(vadc, buf);
+ if (rc)
+ return rc;
+ }
+
buf = QPNP_VADC_HC1_ADC_EN;
rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_EN_CTL1, &buf, 1);
if (rc < 0)
@@ -520,6 +572,12 @@
if (rc < 0)
return rc;
+ if (channel_check) {
+ rc = qpnp_vadc_channel_check(vadc, buf);
+ if (rc)
+ return rc;
+ }
+
/* Wait for GND read to complete */
rc = qpnp_vadc_wait_for_eoc(vadc);
if (rc < 0)
@@ -541,10 +599,16 @@
struct qpnp_adc_amux_properties *amux_prop)
{
int rc = 0;
- u8 buf[6];
+ u8 buf[5];
+ u8 conv_req = 0;
+ bool channel_check = false;
+
+ if (vadc->pmic_rev_id)
+ if (vadc->pmic_rev_id->pmic_subtype == PMI632_SUBTYPE)
+ channel_check = true;
/* Read registers 0x42 through 0x46 */
- rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+ rc = qpnp_vadc_read_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 5);
if (rc < 0) {
pr_err("qpnp adc configure block read failed\n");
return rc;
@@ -568,7 +632,7 @@
buf[4] |= QPNP_VADC_HC1_ADC_EN;
/* Select CONV request */
- buf[5] |= QPNP_VADC_HC1_CONV_REQ_START;
+ conv_req = QPNP_VADC_HC1_CONV_REQ_START;
if (!vadc->vadc_poll_eoc)
reinit_completion(&vadc->adc->adc_rslt_completion);
@@ -577,7 +641,20 @@
buf[0], buf[1], buf[2], buf[3]);
/* Block register write from 0x42 through 0x46 */
- rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 6);
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_ADC_DIG_PARAM, buf, 5);
+ if (rc < 0) {
+ pr_err("qpnp adc block register configure failed\n");
+ return rc;
+ }
+
+ if (channel_check) {
+ rc = qpnp_vadc_channel_check(vadc, buf[2]);
+ if (rc)
+ return rc;
+ }
+
+ rc = qpnp_vadc_write_reg(vadc, QPNP_VADC_HC1_CONV_REQ,
+ &conv_req, 1);
if (rc < 0) {
pr_err("qpnp adc block register configure failed\n");
return rc;
@@ -2711,7 +2788,7 @@
struct qpnp_vadc_chip *vadc;
struct qpnp_adc_drv *adc_qpnp;
struct qpnp_vadc_thermal_data *adc_thermal;
- struct device_node *node = pdev->dev.of_node;
+ struct device_node *node = pdev->dev.of_node, *revid_dev_node;
struct device_node *child;
const struct of_device_id *id;
int rc, count_adc_channel_list = 0, i = 0;
@@ -2765,6 +2842,16 @@
return -ENOMEM;
}
+ revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+ if (revid_dev_node) {
+ vadc->pmic_rev_id = get_revid_data(revid_dev_node);
+ if (IS_ERR(vadc->pmic_rev_id)) {
+ pr_err("Unable to get revid\n");
+ vadc->pmic_rev_id = NULL;
+ }
+ of_node_put(revid_dev_node);
+ }
+
vadc->vadc_therm_chan = adc_thermal;
if (!strcmp(id->compatible, "qcom,qpnp-vadc-hc")) {
vadc->vadc_hc = true;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 663017f..26f1691 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -1408,6 +1408,13 @@
}
#ifdef CONFIG_ACPI
+static bool i801_acpi_is_smbus_ioport(const struct i801_priv *priv,
+ acpi_physical_address address)
+{
+ return address >= priv->smba &&
+ address <= pci_resource_end(priv->pci_dev, SMBBAR);
+}
+
static acpi_status
i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
u64 *value, void *handler_context, void *region_context)
@@ -1423,7 +1430,7 @@
*/
mutex_lock(&priv->acpi_lock);
- if (!priv->acpi_reserved) {
+ if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
priv->acpi_reserved = true;
dev_warn(&pdev->dev, "BIOS is accessing SMBus registers\n");
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 7aa7b9c..efefcfa 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -152,6 +152,7 @@
mt_params[3].type = ACPI_TYPE_INTEGER;
mt_params[3].integer.value = len;
mt_params[4].type = ACPI_TYPE_BUFFER;
+ mt_params[4].buffer.length = len;
mt_params[4].buffer.pointer = data->block + 1;
}
break;
diff --git a/drivers/i2c/busses/i2c-uniphier-f.c b/drivers/i2c/busses/i2c-uniphier-f.c
index db9105e..0da4991 100644
--- a/drivers/i2c/busses/i2c-uniphier-f.c
+++ b/drivers/i2c/busses/i2c-uniphier-f.c
@@ -400,11 +400,8 @@
return ret;
for (msg = msgs; msg < emsg; msg++) {
- /* If next message is read, skip the stop condition */
- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
- /* but, force it if I2C_M_STOP is set */
- if (msg->flags & I2C_M_STOP)
- stop = true;
+ /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
ret = uniphier_fi2c_master_xfer_one(adap, msg, stop);
if (ret)
diff --git a/drivers/i2c/busses/i2c-uniphier.c b/drivers/i2c/busses/i2c-uniphier.c
index 56e92af..fdfcee92 100644
--- a/drivers/i2c/busses/i2c-uniphier.c
+++ b/drivers/i2c/busses/i2c-uniphier.c
@@ -247,11 +247,8 @@
return ret;
for (msg = msgs; msg < emsg; msg++) {
- /* If next message is read, skip the stop condition */
- bool stop = !(msg + 1 < emsg && msg[1].flags & I2C_M_RD);
- /* but, force it if I2C_M_STOP is set */
- if (msg->flags & I2C_M_STOP)
- stop = true;
+ /* Emit STOP if it is the last message or I2C_M_STOP is set. */
+ bool stop = (msg + 1 == emsg) || (msg->flags & I2C_M_STOP);
ret = uniphier_i2c_master_xfer_one(adap, msg, stop);
if (ret)
diff --git a/drivers/iio/imu/Kconfig b/drivers/iio/imu/Kconfig
index 0a8b763..770af2f 100644
--- a/drivers/iio/imu/Kconfig
+++ b/drivers/iio/imu/Kconfig
@@ -40,6 +40,8 @@
source "drivers/iio/imu/inv_mpu6050/Kconfig"
source "drivers/iio/imu/inv_icm20602/Kconfig"
+source "drivers/iio/imu/inv_mpu/Kconfig"
+source "drivers/iio/imu/st_asm330lhh/Kconfig"
endmenu
diff --git a/drivers/iio/imu/Makefile b/drivers/iio/imu/Makefile
index fab6a5e..c70e984 100644
--- a/drivers/iio/imu/Makefile
+++ b/drivers/iio/imu/Makefile
@@ -14,6 +14,8 @@
obj-$(CONFIG_IIO_ADIS_LIB) += adis_lib.o
obj-y += bmi160/
+obj-y += inv_mpu/
+obj-y += st_asm330lhh/
obj-y += inv_mpu6050/
obj-y += inv_icm20602/
diff --git a/drivers/infiniband/core/rw.c b/drivers/infiniband/core/rw.c
index dbfd854..1d90a12 100644
--- a/drivers/infiniband/core/rw.c
+++ b/drivers/infiniband/core/rw.c
@@ -87,7 +87,7 @@
}
ret = ib_map_mr_sg(reg->mr, sg, nents, &offset, PAGE_SIZE);
- if (ret < nents) {
+ if (ret < 0 || ret < nents) {
ib_mr_pool_put(qp, &qp->rdma_mrs, reg->mr);
return -EINVAL;
}
diff --git a/drivers/infiniband/core/ucma.c b/drivers/infiniband/core/ucma.c
index 3bef6d4..fa9ef8e 100644
--- a/drivers/infiniband/core/ucma.c
+++ b/drivers/infiniband/core/ucma.c
@@ -124,6 +124,8 @@
static DEFINE_IDR(ctx_idr);
static DEFINE_IDR(multicast_idr);
+static const struct file_operations ucma_fops;
+
static inline struct ucma_context *_ucma_find_context(int id,
struct ucma_file *file)
{
@@ -1545,6 +1547,10 @@
f = fdget(cmd.fd);
if (!f.file)
return -ENOENT;
+ if (f.file->f_op != &ucma_fops) {
+ ret = -EINVAL;
+ goto file_put;
+ }
/* Validate current fd and prevent destruction of id. */
ctx = ucma_get_ctx(f.file->private_data, cmd.id);
@@ -1714,6 +1720,8 @@
mutex_lock(&mut);
if (!ctx->closing) {
mutex_unlock(&mut);
+ ucma_put_ctx(ctx);
+ wait_for_completion(&ctx->comp);
/* rdma_destroy_id ensures that no event handlers are
* inflight for that id before releasing it.
*/
diff --git a/drivers/infiniband/hw/cxgb4/qp.c b/drivers/infiniband/hw/cxgb4/qp.c
index cc2243f..bb45eb2 100644
--- a/drivers/infiniband/hw/cxgb4/qp.c
+++ b/drivers/infiniband/hw/cxgb4/qp.c
@@ -1258,6 +1258,12 @@
t4_set_wq_in_error(&qhp->wq);
if (qhp->ibqp.uobject) {
+
+ /* for user qps, qhp->wq.flushed is protected by qhp->mutex */
+ if (qhp->wq.flushed)
+ return;
+
+ qhp->wq.flushed = 1;
t4_set_cq_in_error(&rchp->cq);
spin_lock_irqsave(&rchp->comp_handler_lock, flag);
(*rchp->ibcq.comp_handler)(&rchp->ibcq, rchp->ibcq.cq_context);
diff --git a/drivers/infiniband/hw/hfi1/pio.c b/drivers/infiniband/hw/hfi1/pio.c
index d89b874..c2982bb 100644
--- a/drivers/infiniband/hw/hfi1/pio.c
+++ b/drivers/infiniband/hw/hfi1/pio.c
@@ -88,6 +88,7 @@
unsigned long flags;
int write = 1; /* write sendctrl back */
int flush = 0; /* re-read sendctrl to make sure it is flushed */
+ int i;
spin_lock_irqsave(&dd->sendctrl_lock, flags);
@@ -97,9 +98,13 @@
reg |= SEND_CTRL_SEND_ENABLE_SMASK;
/* Fall through */
case PSC_DATA_VL_ENABLE:
+ mask = 0;
+ for (i = 0; i < ARRAY_SIZE(dd->vld); i++)
+ if (!dd->vld[i].mtu)
+ mask |= BIT_ULL(i);
/* Disallow sending on VLs not enabled */
- mask = (((~0ull) << num_vls) & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
- SEND_CTRL_UNSUPPORTED_VL_SHIFT;
+ mask = (mask & SEND_CTRL_UNSUPPORTED_VL_MASK) <<
+ SEND_CTRL_UNSUPPORTED_VL_SHIFT;
reg = (reg & ~SEND_CTRL_UNSUPPORTED_VL_SMASK) | mask;
break;
case PSC_GLOBAL_DISABLE:
diff --git a/drivers/infiniband/hw/hfi1/user_sdma.c b/drivers/infiniband/hw/hfi1/user_sdma.c
index 77697d6..018a415 100644
--- a/drivers/infiniband/hw/hfi1/user_sdma.c
+++ b/drivers/infiniband/hw/hfi1/user_sdma.c
@@ -956,7 +956,7 @@
if (ACCESS_ONCE(iovec->offset) == iovec->iov.iov_len) {
if (++req->iov_idx == req->data_iovs) {
ret = -EFAULT;
- goto free_txreq;
+ goto free_tx;
}
iovec = &req->iovs[req->iov_idx];
WARN_ON(iovec->offset);
diff --git a/drivers/infiniband/hw/hfi1/verbs.c b/drivers/infiniband/hw/hfi1/verbs.c
index 01a380e..14ddb75 100644
--- a/drivers/infiniband/hw/hfi1/verbs.c
+++ b/drivers/infiniband/hw/hfi1/verbs.c
@@ -1511,12 +1511,18 @@
struct hfi1_pportdata *ppd;
struct hfi1_devdata *dd;
u8 sc5;
+ u8 sl;
/* test the mapping for validity */
ibp = to_iport(ibdev, ah_attr->port_num);
ppd = ppd_from_ibp(ibp);
- sc5 = ibp->sl_to_sc[ah_attr->sl];
dd = dd_from_ppd(ppd);
+
+ sl = ah_attr->sl;
+ if (sl >= ARRAY_SIZE(ibp->sl_to_sc))
+ return -EINVAL;
+
+ sc5 = ibp->sl_to_sc[sl];
if (sc_to_vlt(dd, sc5) > num_vls && sc_to_vlt(dd, sc5) != 0xf)
return -EINVAL;
return 0;
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 463ea59..6463590 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -2639,7 +2639,7 @@
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_rdma_ch *ch;
- int i;
+ int i, j;
u8 status;
shost_printk(KERN_ERR, target->scsi_host, "SRP reset_device called\n");
@@ -2653,8 +2653,8 @@
for (i = 0; i < target->ch_count; i++) {
ch = &target->ch[i];
- for (i = 0; i < target->req_ring_size; ++i) {
- struct srp_request *req = &ch->req_ring[i];
+ for (j = 0; j < target->req_ring_size; ++j) {
+ struct srp_request *req = &ch->req_ring[j];
srp_finish_req(ch, req, scmnd->device, DID_RESET << 16);
}
diff --git a/drivers/input/Kconfig b/drivers/input/Kconfig
index 34ffa02..de41b16 100644
--- a/drivers/input/Kconfig
+++ b/drivers/input/Kconfig
@@ -214,6 +214,8 @@
source "drivers/input/misc/Kconfig"
+source "drivers/input/sensors/smi130/Kconfig"
+
source "drivers/input/rmi4/Kconfig"
endif
diff --git a/drivers/input/Makefile b/drivers/input/Makefile
index d01a5b1..388ab27 100644
--- a/drivers/input/Makefile
+++ b/drivers/input/Makefile
@@ -33,4 +33,4 @@
obj-$(CONFIG_INPUT_KEYRESET) += keyreset.o
obj-$(CONFIG_INPUT_KEYCOMBO) += keycombo.o
-
+obj-y += sensors/smi130/
diff --git a/drivers/input/keyboard/atakbd.c b/drivers/input/keyboard/atakbd.c
index f123583..fdeda0b 100644
--- a/drivers/input/keyboard/atakbd.c
+++ b/drivers/input/keyboard/atakbd.c
@@ -79,8 +79,7 @@
*/
-static unsigned char atakbd_keycode[0x72] = { /* American layout */
- [0] = KEY_GRAVE,
+static unsigned char atakbd_keycode[0x73] = { /* American layout */
[1] = KEY_ESC,
[2] = KEY_1,
[3] = KEY_2,
@@ -121,9 +120,9 @@
[38] = KEY_L,
[39] = KEY_SEMICOLON,
[40] = KEY_APOSTROPHE,
- [41] = KEY_BACKSLASH, /* FIXME, '#' */
+ [41] = KEY_GRAVE,
[42] = KEY_LEFTSHIFT,
- [43] = KEY_GRAVE, /* FIXME: '~' */
+ [43] = KEY_BACKSLASH,
[44] = KEY_Z,
[45] = KEY_X,
[46] = KEY_C,
@@ -149,45 +148,34 @@
[66] = KEY_F8,
[67] = KEY_F9,
[68] = KEY_F10,
- [69] = KEY_ESC,
- [70] = KEY_DELETE,
- [71] = KEY_KP7,
- [72] = KEY_KP8,
- [73] = KEY_KP9,
+ [71] = KEY_HOME,
+ [72] = KEY_UP,
[74] = KEY_KPMINUS,
- [75] = KEY_KP4,
- [76] = KEY_KP5,
- [77] = KEY_KP6,
+ [75] = KEY_LEFT,
+ [77] = KEY_RIGHT,
[78] = KEY_KPPLUS,
- [79] = KEY_KP1,
- [80] = KEY_KP2,
- [81] = KEY_KP3,
- [82] = KEY_KP0,
- [83] = KEY_KPDOT,
- [90] = KEY_KPLEFTPAREN,
- [91] = KEY_KPRIGHTPAREN,
- [92] = KEY_KPASTERISK, /* FIXME */
- [93] = KEY_KPASTERISK,
- [94] = KEY_KPPLUS,
- [95] = KEY_HELP,
+ [80] = KEY_DOWN,
+ [82] = KEY_INSERT,
+ [83] = KEY_DELETE,
[96] = KEY_102ND,
- [97] = KEY_KPASTERISK, /* FIXME */
- [98] = KEY_KPSLASH,
+ [97] = KEY_UNDO,
+ [98] = KEY_HELP,
[99] = KEY_KPLEFTPAREN,
[100] = KEY_KPRIGHTPAREN,
[101] = KEY_KPSLASH,
[102] = KEY_KPASTERISK,
- [103] = KEY_UP,
- [104] = KEY_KPASTERISK, /* FIXME */
- [105] = KEY_LEFT,
- [106] = KEY_RIGHT,
- [107] = KEY_KPASTERISK, /* FIXME */
- [108] = KEY_DOWN,
- [109] = KEY_KPASTERISK, /* FIXME */
- [110] = KEY_KPASTERISK, /* FIXME */
- [111] = KEY_KPASTERISK, /* FIXME */
- [112] = KEY_KPASTERISK, /* FIXME */
- [113] = KEY_KPASTERISK /* FIXME */
+ [103] = KEY_KP7,
+ [104] = KEY_KP8,
+ [105] = KEY_KP9,
+ [106] = KEY_KP4,
+ [107] = KEY_KP5,
+ [108] = KEY_KP6,
+ [109] = KEY_KP1,
+ [110] = KEY_KP2,
+ [111] = KEY_KP3,
+ [112] = KEY_KP0,
+ [113] = KEY_KPDOT,
+ [114] = KEY_KPENTER,
};
static struct input_dev *atakbd_dev;
@@ -195,21 +183,15 @@
static void atakbd_interrupt(unsigned char scancode, char down)
{
- if (scancode < 0x72) { /* scancodes < 0xf2 are keys */
+ if (scancode < 0x73) { /* scancodes < 0xf3 are keys */
// report raw events here?
scancode = atakbd_keycode[scancode];
- if (scancode == KEY_CAPSLOCK) { /* CapsLock is a toggle switch key on Amiga */
- input_report_key(atakbd_dev, scancode, 1);
- input_report_key(atakbd_dev, scancode, 0);
- input_sync(atakbd_dev);
- } else {
- input_report_key(atakbd_dev, scancode, down);
- input_sync(atakbd_dev);
- }
- } else /* scancodes >= 0xf2 are mouse data, most likely */
+ input_report_key(atakbd_dev, scancode, down);
+ input_sync(atakbd_dev);
+ } else /* scancodes >= 0xf3 are mouse data, most likely */
printk(KERN_INFO "atakbd: unhandled scancode %x\n", scancode);
return;
diff --git a/drivers/input/mouse/elantech.c b/drivers/input/mouse/elantech.c
index 4e77adb..c120afd 100644
--- a/drivers/input/mouse/elantech.c
+++ b/drivers/input/mouse/elantech.c
@@ -1176,6 +1176,8 @@
static const char * const middle_button_pnp_ids[] = {
"LEN2131", /* ThinkPad P52 w/ NFC */
"LEN2132", /* ThinkPad P52 */
+ "LEN2133", /* ThinkPad P72 w/ NFC */
+ "LEN2134", /* ThinkPad P72 */
NULL
};
diff --git a/drivers/input/sensors/smi130/smi130_acc.c b/drivers/input/sensors/smi130/smi130_acc.c
index 4828b39..3979b65 100644
--- a/drivers/input/sensors/smi130/smi130_acc.c
+++ b/drivers/input/sensors/smi130/smi130_acc.c
@@ -154,7 +154,7 @@
#include "bs_log.h"
#define DRIVER_VERSION "0.0.53.0"
#define ACC_NAME "ACC"
-#define SMI_ACC2X2_ENABLE_INT1 1
+#define SMI_ACC2X2_ENABLE_INT2 1
#define CONFIG_SMI_ACC_ENABLE_NEWDATA_INT 1
#define SENSOR_NAME "smi130_acc"
@@ -6802,7 +6802,7 @@
#endif
smi130_acc_get_interruptstatus1(smi130_acc->smi130_acc_client, &status);
- PINFO("smi130_acc_irq_work_func, status = 0x%x\n", status);
+ PDEBUG("smi130_acc_irq_work_func, status = 0x%x\n", status);
#ifdef CONFIG_SIG_MOTION
if (status & 0x04) {
@@ -7404,64 +7404,6 @@
mutex_unlock(&data->enable_mutex);
}
-#ifdef CONFIG_PM
-static int smi130_acc_suspend(struct i2c_client *client, pm_message_t mesg)
-{
- struct smi130_acc_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->enable_mutex);
- if (atomic_read(&data->enable) == 1) {
- smi130_acc_set_mode(data->smi130_acc_client,
- SMI_ACC2X2_MODE_SUSPEND, SMI_ACC_ENABLED_INPUT);
-#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
- cancel_delayed_work_sync(&data->work);
-#endif
- }
- if (data->is_timer_running) {
- hrtimer_cancel(&data->timer);
- data->base_time = 0;
- data->timestamp = 0;
- data->fifo_time = 0;
- data->acc_count = 0;
- }
- mutex_unlock(&data->enable_mutex);
-
- return 0;
-}
-
-static int smi130_acc_resume(struct i2c_client *client)
-{
- struct smi130_acc_data *data = i2c_get_clientdata(client);
-
- mutex_lock(&data->enable_mutex);
- if (atomic_read(&data->enable) == 1) {
- smi130_acc_set_mode(data->smi130_acc_client,
- SMI_ACC2X2_MODE_NORMAL, SMI_ACC_ENABLED_INPUT);
-#ifndef CONFIG_SMI_ACC_ENABLE_NEWDATA_INT
- schedule_delayed_work(&data->work,
- msecs_to_jiffies(atomic_read(&data->delay)));
-#endif
- }
- if (data->is_timer_running) {
- hrtimer_start(&data->timer,
- ns_to_ktime(data->time_odr),
- HRTIMER_MODE_REL);
- data->base_time = 0;
- data->timestamp = 0;
- data->is_timer_running = 1;
- }
- mutex_unlock(&data->enable_mutex);
-
- return 0;
-}
-
-#else
-
-#define smi130_acc_suspend NULL
-#define smi130_acc_resume NULL
-
-#endif /* CONFIG_PM */
-
static const struct i2c_device_id smi130_acc_id[] = {
{ SENSOR_NAME, 0 },
{ }
@@ -7480,8 +7422,6 @@
.name = SENSOR_NAME,
.of_match_table = smi130_acc_of_match,
},
- //.suspend = smi130_acc_suspend,
- //.resume = smi130_acc_resume,
.id_table = smi130_acc_id,
.probe = smi130_acc_probe,
.remove = smi130_acc_remove,
diff --git a/drivers/input/sensors/smi130/smi130_gyro_driver.c b/drivers/input/sensors/smi130/smi130_gyro_driver.c
index 65e303c..5ed9513 100644
--- a/drivers/input/sensors/smi130/smi130_gyro_driver.c
+++ b/drivers/input/sensors/smi130/smi130_gyro_driver.c
@@ -293,10 +293,9 @@
static void smi_gyro_dump_reg(struct i2c_client *client);
static int smi_gyro_check_chip_id(struct i2c_client *client);
-static int smi_gyro_pre_suspend(struct i2c_client *client);
-static int smi_gyro_post_resume(struct i2c_client *client);
-
#ifdef CONFIG_HAS_EARLYSUSPEND
+static int smi_gyro_post_resume(struct i2c_client *client);
+static int smi_gyro_pre_suspend(struct i2c_client *client);
static void smi_gyro_early_suspend(struct early_suspend *handler);
static void smi_gyro_late_resume(struct early_suspend *handler);
#endif
@@ -1812,6 +1811,7 @@
return err;
}
+#ifdef CONFIG_HAS_EARLYSUSPEND
static int smi_gyro_pre_suspend(struct i2c_client *client)
{
int err = 0;
@@ -1861,7 +1861,6 @@
return err;
}
-#ifdef CONFIG_HAS_EARLYSUSPEND
static void smi_gyro_early_suspend(struct early_suspend *handler)
{
int err = 0;
@@ -1902,45 +1901,6 @@
mutex_unlock(&client_data->mutex_op_mode);
}
-#else
-static int smi_gyro_suspend(struct i2c_client *client, pm_message_t mesg)
-{
- int err = 0;
- struct smi_gyro_client_data *client_data =
- (struct smi_gyro_client_data *)i2c_get_clientdata(client);
-
- PINFO("function entrance");
-
- mutex_lock(&client_data->mutex_op_mode);
- if (client_data->enable) {
- err = smi_gyro_pre_suspend(client);
- err = SMI_GYRO_CALL_API(set_mode)(
- SMI_GYRO_VAL_NAME(MODE_SUSPEND));
- }
- mutex_unlock(&client_data->mutex_op_mode);
- return err;
-}
-
-static int smi_gyro_resume(struct i2c_client *client)
-{
-
- int err = 0;
- struct smi_gyro_client_data *client_data =
- (struct smi_gyro_client_data *)i2c_get_clientdata(client);
-
- PINFO("function entrance");
-
- mutex_lock(&client_data->mutex_op_mode);
-
- if (client_data->enable)
- err = SMI_GYRO_CALL_API(set_mode)(SMI_GYRO_VAL_NAME(MODE_NORMAL));
-
- /* post resume operation */
- smi_gyro_post_resume(client);
-
- mutex_unlock(&client_data->mutex_op_mode);
- return err;
-}
#endif
void smi_gyro_shutdown(struct i2c_client *client)
@@ -2012,10 +1972,6 @@
.probe = smi_gyro_probe,
.remove = smi_gyro_remove,
.shutdown = smi_gyro_shutdown,
-#ifndef CONFIG_HAS_EARLYSUSPEND
- //.suspend = smi_gyro_suspend,
- //.resume = smi_gyro_resume,
-#endif
};
static int __init SMI_GYRO_init(void)
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index 0c910a8..bba1b9f 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -288,7 +288,13 @@
/* The callers make sure that get_device_id() does not fail here */
devid = get_device_id(dev);
+
+ /* For ACPI HID devices, we simply return the devid as such */
+ if (!dev_is_pci(dev))
+ return devid;
+
ivrs_alias = amd_iommu_alias_table[devid];
+
pci_for_each_dma_alias(pdev, __last_alias, &pci_alias);
if (ivrs_alias == pci_alias)
@@ -2452,9 +2458,9 @@
}
if (amd_iommu_unmap_flush) {
- dma_ops_free_iova(dma_dom, dma_addr, pages);
domain_flush_tlb(&dma_dom->domain);
domain_flush_complete(&dma_dom->domain);
+ dma_ops_free_iova(dma_dom, dma_addr, pages);
} else {
queue_add(dma_dom, dma_addr, pages);
}
diff --git a/drivers/irqchip/qcom/mpm.c b/drivers/irqchip/qcom/mpm.c
index 5e352f4..813b6b3 100644
--- a/drivers/irqchip/qcom/mpm.c
+++ b/drivers/irqchip/qcom/mpm.c
@@ -417,7 +417,7 @@
}
-static void system_pm_exit_sleep(void)
+static void system_pm_exit_sleep(bool success)
{
msm_rpm_exit_sleep();
}
diff --git a/drivers/leds/leds-qpnp-vibrator.c b/drivers/leds/leds-qpnp-vibrator.c
index cc2615d..81f54f0 100644
--- a/drivers/leds/leds-qpnp-vibrator.c
+++ b/drivers/leds/leds-qpnp-vibrator.c
@@ -391,7 +391,6 @@
(vib->vib_play_ms % 1000) * 1000000),
HRTIMER_MODE_REL);
}
- vib->vib_play_ms = 0;
mutex_unlock(&vib->lock);
schedule_work(&vib->work);
diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c
index 25852e3..c5aba26 100644
--- a/drivers/macintosh/rack-meter.c
+++ b/drivers/macintosh/rack-meter.c
@@ -52,8 +52,8 @@
struct rackmeter_cpu {
struct delayed_work sniffer;
struct rackmeter *rm;
- cputime64_t prev_wall;
- cputime64_t prev_idle;
+ u64 prev_wall;
+ u64 prev_idle;
int zero;
} ____cacheline_aligned;
@@ -81,7 +81,7 @@
/* This is copied from cpufreq_ondemand, maybe we should put it in
* a common header somewhere
*/
-static inline cputime64_t get_cpu_idle_time(unsigned int cpu)
+static inline u64 get_cpu_idle_time(unsigned int cpu)
{
u64 retval;
@@ -217,23 +217,23 @@
container_of(work, struct rackmeter_cpu, sniffer.work);
struct rackmeter *rm = rcpu->rm;
unsigned int cpu = smp_processor_id();
- cputime64_t cur_jiffies, total_idle_ticks;
- unsigned int total_ticks, idle_ticks;
+ u64 cur_nsecs, total_idle_nsecs;
+ u64 total_nsecs, idle_nsecs;
int i, offset, load, cumm, pause;
- cur_jiffies = jiffies64_to_cputime64(get_jiffies_64());
- total_ticks = (unsigned int) (cur_jiffies - rcpu->prev_wall);
- rcpu->prev_wall = cur_jiffies;
+ cur_nsecs = jiffies64_to_nsecs(get_jiffies_64());
+ total_nsecs = cur_nsecs - rcpu->prev_wall;
+ rcpu->prev_wall = cur_nsecs;
- total_idle_ticks = get_cpu_idle_time(cpu);
- idle_ticks = (unsigned int) (total_idle_ticks - rcpu->prev_idle);
- idle_ticks = min(idle_ticks, total_ticks);
- rcpu->prev_idle = total_idle_ticks;
+ total_idle_nsecs = get_cpu_idle_time(cpu);
+ idle_nsecs = total_idle_nsecs - rcpu->prev_idle;
+ idle_nsecs = min(idle_nsecs, total_nsecs);
+ rcpu->prev_idle = total_idle_nsecs;
/* We do a very dumb calculation to update the LEDs for now,
* we'll do better once we have actual PWM implemented
*/
- load = (9 * (total_ticks - idle_ticks)) / total_ticks;
+ load = div64_u64(9 * (total_nsecs - idle_nsecs), total_nsecs);
offset = cpu << 3;
cumm = 0;
@@ -278,7 +278,7 @@
continue;
rcpu = &rm->cpu[cpu];
rcpu->prev_idle = get_cpu_idle_time(cpu);
- rcpu->prev_wall = jiffies64_to_cputime64(get_jiffies_64());
+ rcpu->prev_wall = jiffies64_to_nsecs(get_jiffies_64());
schedule_delayed_work_on(cpu, &rm->cpu[cpu].sniffer,
msecs_to_jiffies(CPU_SAMPLING_RATE));
}
diff --git a/drivers/md/dm-cache-metadata.c b/drivers/md/dm-cache-metadata.c
index a184c98..62eb4b7 100644
--- a/drivers/md/dm-cache-metadata.c
+++ b/drivers/md/dm-cache-metadata.c
@@ -1262,8 +1262,8 @@
if (hints_valid) {
r = dm_array_cursor_next(&cmd->hint_cursor);
if (r) {
- DMERR("dm_array_cursor_next for hint failed");
- goto out;
+ dm_array_cursor_end(&cmd->hint_cursor);
+ hints_valid = false;
}
}
}
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index bed056c..f3993a4 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -3390,8 +3390,13 @@
static bool can_resize(struct cache *cache, dm_cblock_t new_size)
{
- if (from_cblock(new_size) > from_cblock(cache->cache_size))
- return true;
+ if (from_cblock(new_size) > from_cblock(cache->cache_size)) {
+ if (cache->sized) {
+ DMERR("%s: unable to extend cache due to missing cache table reload",
+ cache_device_name(cache));
+ return false;
+ }
+ }
/*
* We can't drop a dirty block when shrinking the cache.
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 7a5b75f..a4ac5f6 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -113,7 +113,8 @@
* and encrypts / decrypts at the same time.
*/
enum flags { DM_CRYPT_SUSPENDED, DM_CRYPT_KEY_VALID,
- DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD };
+ DM_CRYPT_SAME_CPU, DM_CRYPT_NO_OFFLOAD,
+ DM_CRYPT_ENCRYPT_OVERRIDE };
/*
* The fields in here must be read only after initialization.
@@ -1858,6 +1859,9 @@
else if (!strcasecmp(opt_string, "submit_from_crypt_cpus"))
set_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
+ else if (!strcasecmp(opt_string, "allow_encrypt_override"))
+ set_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags);
+
else {
ti->error = "Invalid feature arguments";
goto bad;
@@ -1918,12 +1922,15 @@
struct crypt_config *cc = ti->private;
/*
- * If bio is REQ_PREFLUSH or REQ_OP_DISCARD, just bypass crypt queues.
+ * If bio is REQ_PREFLUSH, REQ_NOENCRYPT, or REQ_OP_DISCARD,
+ * just bypass crypt queues.
* - for REQ_PREFLUSH device-mapper core ensures that no IO is in-flight
* - for REQ_OP_DISCARD caller must use flush if IO ordering matters
*/
- if (unlikely(bio->bi_opf & REQ_PREFLUSH ||
- bio_op(bio) == REQ_OP_DISCARD)) {
+ if (unlikely(bio->bi_opf & REQ_PREFLUSH) ||
+ (unlikely(bio->bi_opf & REQ_NOENCRYPT) &&
+ test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags)) ||
+ bio_op(bio) == REQ_OP_DISCARD) {
bio->bi_bdev = cc->dev->bdev;
if (bio_sectors(bio))
bio->bi_iter.bi_sector = cc->start +
@@ -1978,6 +1985,7 @@
num_feature_args += !!ti->num_discard_bios;
num_feature_args += test_bit(DM_CRYPT_SAME_CPU, &cc->flags);
num_feature_args += test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags);
+ num_feature_args += test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags);
if (num_feature_args) {
DMEMIT(" %d", num_feature_args);
if (ti->num_discard_bios)
@@ -1986,6 +1994,8 @@
DMEMIT(" same_cpu_crypt");
if (test_bit(DM_CRYPT_NO_OFFLOAD, &cc->flags))
DMEMIT(" submit_from_crypt_cpus");
+ if (test_bit(DM_CRYPT_ENCRYPT_OVERRIDE, &cc->flags))
+ DMEMIT(" allow_encrypt_override");
}
break;
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index ee75e35..3f389b2 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -2880,6 +2880,11 @@
set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
rs_set_new(rs);
} else if (rs_is_recovering(rs)) {
+ /* Rebuild particular devices */
+ if (test_bit(__CTR_FLAG_REBUILD, &rs->ctr_flags)) {
+ set_bit(RT_FLAG_UPDATE_SBS, &rs->runtime_flags);
+ rs_setup_recovery(rs, MaxSector);
+ }
/* A recovering raid set may be resized */
; /* skip setup rs */
} else if (rs_is_reshaping(rs)) {
diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
index e976f4f..149fbac 100644
--- a/drivers/md/dm-thin-metadata.c
+++ b/drivers/md/dm-thin-metadata.c
@@ -190,6 +190,12 @@
sector_t data_block_size;
/*
+ * We reserve a section of the metadata for commit overhead.
+ * All reported space does *not* include this.
+ */
+ dm_block_t metadata_reserve;
+
+ /*
* Set if a transaction has to be aborted but the attempt to roll back
* to the previous (good) transaction failed. The only pool metadata
* operation possible in this state is the closing of the device.
@@ -827,6 +833,20 @@
return dm_tm_commit(pmd->tm, sblock);
}
+static void __set_metadata_reserve(struct dm_pool_metadata *pmd)
+{
+ int r;
+ dm_block_t total;
+ dm_block_t max_blocks = 4096; /* 16M */
+
+ r = dm_sm_get_nr_blocks(pmd->metadata_sm, &total);
+ if (r) {
+ DMERR("could not get size of metadata device");
+ pmd->metadata_reserve = max_blocks;
+ } else
+ pmd->metadata_reserve = min(max_blocks, div_u64(total, 10));
+}
+
struct dm_pool_metadata *dm_pool_metadata_open(struct block_device *bdev,
sector_t data_block_size,
bool format_device)
@@ -860,6 +880,8 @@
return ERR_PTR(r);
}
+ __set_metadata_reserve(pmd);
+
return pmd;
}
@@ -1831,6 +1853,13 @@
down_read(&pmd->root_lock);
if (!pmd->fail_io)
r = dm_sm_get_nr_free(pmd->metadata_sm, result);
+
+ if (!r) {
+ if (*result < pmd->metadata_reserve)
+ *result = 0;
+ else
+ *result -= pmd->metadata_reserve;
+ }
up_read(&pmd->root_lock);
return r;
@@ -1943,8 +1972,11 @@
int r = -EINVAL;
down_write(&pmd->root_lock);
- if (!pmd->fail_io)
+ if (!pmd->fail_io) {
r = __resize_space_map(pmd->metadata_sm, new_count);
+ if (!r)
+ __set_metadata_reserve(pmd);
+ }
up_write(&pmd->root_lock);
return r;
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 5a03e15..e697283 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -200,7 +200,13 @@
enum pool_mode {
PM_WRITE, /* metadata may be changed */
PM_OUT_OF_DATA_SPACE, /* metadata may be changed, though data may not be allocated */
+
+ /*
+ * Like READ_ONLY, except may switch back to WRITE on metadata resize. Reported as READ_ONLY.
+ */
+ PM_OUT_OF_METADATA_SPACE,
PM_READ_ONLY, /* metadata may not be changed */
+
PM_FAIL, /* all I/O fails */
};
@@ -1386,7 +1392,35 @@
static void requeue_bios(struct pool *pool);
-static void check_for_space(struct pool *pool)
+static bool is_read_only_pool_mode(enum pool_mode mode)
+{
+ return (mode == PM_OUT_OF_METADATA_SPACE || mode == PM_READ_ONLY);
+}
+
+static bool is_read_only(struct pool *pool)
+{
+ return is_read_only_pool_mode(get_pool_mode(pool));
+}
+
+static void check_for_metadata_space(struct pool *pool)
+{
+ int r;
+ const char *ooms_reason = NULL;
+ dm_block_t nr_free;
+
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free);
+ if (r)
+ ooms_reason = "Could not get free metadata blocks";
+ else if (!nr_free)
+ ooms_reason = "No free metadata blocks";
+
+ if (ooms_reason && !is_read_only(pool)) {
+ DMERR("%s", ooms_reason);
+ set_pool_mode(pool, PM_OUT_OF_METADATA_SPACE);
+ }
+}
+
+static void check_for_data_space(struct pool *pool)
{
int r;
dm_block_t nr_free;
@@ -1412,14 +1446,16 @@
{
int r;
- if (get_pool_mode(pool) >= PM_READ_ONLY)
+ if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE)
return -EINVAL;
r = dm_pool_commit_metadata(pool->pmd);
if (r)
metadata_operation_failed(pool, "dm_pool_commit_metadata", r);
- else
- check_for_space(pool);
+ else {
+ check_for_metadata_space(pool);
+ check_for_data_space(pool);
+ }
return r;
}
@@ -1485,6 +1521,19 @@
return r;
}
+ r = dm_pool_get_free_metadata_block_count(pool->pmd, &free_blocks);
+ if (r) {
+ metadata_operation_failed(pool, "dm_pool_get_free_metadata_block_count", r);
+ return r;
+ }
+
+ if (!free_blocks) {
+ /* Let's commit before we use up the metadata reserve. */
+ r = commit(pool);
+ if (r)
+ return r;
+ }
+
return 0;
}
@@ -1516,6 +1565,7 @@
case PM_OUT_OF_DATA_SPACE:
return pool->pf.error_if_no_space ? -ENOSPC : 0;
+ case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
case PM_FAIL:
return -EIO;
@@ -2479,8 +2529,9 @@
error_retry_list(pool);
break;
+ case PM_OUT_OF_METADATA_SPACE:
case PM_READ_ONLY:
- if (old_mode != new_mode)
+ if (!is_read_only_pool_mode(old_mode))
notify_of_pool_mode_change(pool, "read-only");
dm_pool_metadata_read_only(pool->pmd);
pool->process_bio = process_bio_read_only;
@@ -3418,6 +3469,10 @@
DMINFO("%s: growing the metadata device from %llu to %llu blocks",
dm_device_name(pool->pool_md),
sb_metadata_dev_size, metadata_dev_size);
+
+ if (get_pool_mode(pool) == PM_OUT_OF_METADATA_SPACE)
+ set_pool_mode(pool, PM_WRITE);
+
r = dm_pool_resize_metadata_dev(pool->pmd, metadata_dev_size);
if (r) {
metadata_operation_failed(pool, "dm_pool_resize_metadata_dev", r);
@@ -3721,7 +3776,7 @@
struct pool_c *pt = ti->private;
struct pool *pool = pt->pool;
- if (get_pool_mode(pool) >= PM_READ_ONLY) {
+ if (get_pool_mode(pool) >= PM_OUT_OF_METADATA_SPACE) {
DMERR("%s: unable to service pool target messages in READ_ONLY or FAIL mode",
dm_device_name(pool->pool_md));
return -EOPNOTSUPP;
@@ -3795,6 +3850,7 @@
dm_block_t nr_blocks_data;
dm_block_t nr_blocks_metadata;
dm_block_t held_root;
+ enum pool_mode mode;
char buf[BDEVNAME_SIZE];
char buf2[BDEVNAME_SIZE];
struct pool_c *pt = ti->private;
@@ -3865,9 +3921,10 @@
else
DMEMIT("- ");
- if (pool->pf.mode == PM_OUT_OF_DATA_SPACE)
+ mode = get_pool_mode(pool);
+ if (mode == PM_OUT_OF_DATA_SPACE)
DMEMIT("out_of_data_space ");
- else if (pool->pf.mode == PM_READ_ONLY)
+ else if (is_read_only_pool_mode(mode))
DMEMIT("ro ");
else
DMEMIT("rw ");
diff --git a/drivers/md/md-cluster.c b/drivers/md/md-cluster.c
index fcc2b57..e870b09 100644
--- a/drivers/md/md-cluster.c
+++ b/drivers/md/md-cluster.c
@@ -302,15 +302,6 @@
while (cinfo->recovery_map) {
slot = fls64((u64)cinfo->recovery_map) - 1;
- /* Clear suspend_area associated with the bitmap */
- spin_lock_irq(&cinfo->suspend_lock);
- list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
- if (slot == s->slot) {
- list_del(&s->list);
- kfree(s);
- }
- spin_unlock_irq(&cinfo->suspend_lock);
-
snprintf(str, 64, "bitmap%04d", slot);
bm_lockres = lockres_init(mddev, str, NULL, 1);
if (!bm_lockres) {
@@ -329,6 +320,16 @@
pr_err("md-cluster: Could not copy data from bitmap %d\n", slot);
goto clear_bit;
}
+
+ /* Clear suspend_area associated with the bitmap */
+ spin_lock_irq(&cinfo->suspend_lock);
+ list_for_each_entry_safe(s, tmp, &cinfo->suspend_list, list)
+ if (slot == s->slot) {
+ list_del(&s->list);
+ kfree(s);
+ }
+ spin_unlock_irq(&cinfo->suspend_lock);
+
if (hi > 0) {
if (lo < mddev->recovery_cp)
mddev->recovery_cp = lo;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8fd2ac3..0e52852 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -4381,11 +4381,12 @@
allow_barrier(conf);
}
+ raise_barrier(conf, 0);
read_more:
/* Now schedule reads for blocks from sector_nr to last */
r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
r10_bio->state = 0;
- raise_barrier(conf, sectors_done != 0);
+ raise_barrier(conf, 1);
atomic_set(&r10_bio->remaining, 0);
r10_bio->mddev = mddev;
r10_bio->sector = sector_nr;
@@ -4492,6 +4493,8 @@
if (sector_nr <= last)
goto read_more;
+ lower_barrier(conf);
+
/* Now that we have done the whole section we can
* update reshape_progress
*/
diff --git a/drivers/media/i2c/soc_camera/ov772x.c b/drivers/media/i2c/soc_camera/ov772x.c
index 7e68762..fa1cb24 100644
--- a/drivers/media/i2c/soc_camera/ov772x.c
+++ b/drivers/media/i2c/soc_camera/ov772x.c
@@ -834,7 +834,7 @@
* set COM8
*/
if (priv->band_filter) {
- ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, 1);
+ ret = ov772x_mask_set(client, COM8, BNDF_ON_OFF, BNDF_ON_OFF);
if (!ret)
ret = ov772x_mask_set(client, BDBASE,
0xff, 256 - priv->band_filter);
diff --git a/drivers/media/platform/exynos4-is/fimc-isp-video.c b/drivers/media/platform/exynos4-is/fimc-isp-video.c
index 400ce0c..e00fa03 100644
--- a/drivers/media/platform/exynos4-is/fimc-isp-video.c
+++ b/drivers/media/platform/exynos4-is/fimc-isp-video.c
@@ -384,12 +384,17 @@
struct v4l2_pix_format_mplane *pixm,
const struct fimc_fmt **fmt)
{
- *fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+ const struct fimc_fmt *__fmt;
+
+ __fmt = fimc_isp_find_format(&pixm->pixelformat, NULL, 2);
+
+ if (fmt)
+ *fmt = __fmt;
pixm->colorspace = V4L2_COLORSPACE_SRGB;
pixm->field = V4L2_FIELD_NONE;
- pixm->num_planes = (*fmt)->memplanes;
- pixm->pixelformat = (*fmt)->fourcc;
+ pixm->num_planes = __fmt->memplanes;
+ pixm->pixelformat = __fmt->fourcc;
/*
* TODO: double check with the docmentation these width/height
* constraints are correct.
diff --git a/drivers/media/platform/fsl-viu.c b/drivers/media/platform/fsl-viu.c
index ae8c6b3..7f0ed5a 100644
--- a/drivers/media/platform/fsl-viu.c
+++ b/drivers/media/platform/fsl-viu.c
@@ -1417,7 +1417,7 @@
sizeof(struct viu_reg), DRV_NAME)) {
dev_err(&op->dev, "Error while requesting mem region\n");
ret = -EBUSY;
- goto err;
+ goto err_irq;
}
/* remap registers */
@@ -1425,7 +1425,7 @@
if (!viu_regs) {
dev_err(&op->dev, "Can't map register set\n");
ret = -ENOMEM;
- goto err;
+ goto err_irq;
}
/* Prepare our private structure */
@@ -1433,7 +1433,7 @@
if (!viu_dev) {
dev_err(&op->dev, "Can't allocate private structure\n");
ret = -ENOMEM;
- goto err;
+ goto err_irq;
}
viu_dev->vr = viu_regs;
@@ -1449,16 +1449,21 @@
ret = v4l2_device_register(viu_dev->dev, &viu_dev->v4l2_dev);
if (ret < 0) {
dev_err(&op->dev, "v4l2_device_register() failed: %d\n", ret);
- goto err;
+ goto err_irq;
}
ad = i2c_get_adapter(0);
+ if (!ad) {
+ ret = -EFAULT;
+ dev_err(&op->dev, "couldn't get i2c adapter\n");
+ goto err_v4l2;
+ }
v4l2_ctrl_handler_init(&viu_dev->hdl, 5);
if (viu_dev->hdl.error) {
ret = viu_dev->hdl.error;
dev_err(&op->dev, "couldn't register control\n");
- goto err_vdev;
+ goto err_i2c;
}
/* This control handler will inherit the control(s) from the
sub-device(s). */
@@ -1476,7 +1481,7 @@
vdev = video_device_alloc();
if (vdev == NULL) {
ret = -ENOMEM;
- goto err_vdev;
+ goto err_hdl;
}
*vdev = viu_template;
@@ -1497,7 +1502,7 @@
ret = video_register_device(viu_dev->vdev, VFL_TYPE_GRABBER, -1);
if (ret < 0) {
video_device_release(viu_dev->vdev);
- goto err_vdev;
+ goto err_unlock;
}
/* enable VIU clock */
@@ -1505,12 +1510,12 @@
if (IS_ERR(clk)) {
dev_err(&op->dev, "failed to lookup the clock!\n");
ret = PTR_ERR(clk);
- goto err_clk;
+ goto err_vdev;
}
ret = clk_prepare_enable(clk);
if (ret) {
dev_err(&op->dev, "failed to enable the clock!\n");
- goto err_clk;
+ goto err_vdev;
}
viu_dev->clk = clk;
@@ -1521,7 +1526,7 @@
if (request_irq(viu_dev->irq, viu_intr, 0, "viu", (void *)viu_dev)) {
dev_err(&op->dev, "Request VIU IRQ failed.\n");
ret = -ENODEV;
- goto err_irq;
+ goto err_clk;
}
mutex_unlock(&viu_dev->lock);
@@ -1529,16 +1534,19 @@
dev_info(&op->dev, "Freescale VIU Video Capture Board\n");
return ret;
-err_irq:
- clk_disable_unprepare(viu_dev->clk);
err_clk:
- video_unregister_device(viu_dev->vdev);
+ clk_disable_unprepare(viu_dev->clk);
err_vdev:
- v4l2_ctrl_handler_free(&viu_dev->hdl);
+ video_unregister_device(viu_dev->vdev);
+err_unlock:
mutex_unlock(&viu_dev->lock);
+err_hdl:
+ v4l2_ctrl_handler_free(&viu_dev->hdl);
+err_i2c:
i2c_put_adapter(ad);
+err_v4l2:
v4l2_device_unregister(&viu_dev->v4l2_dev);
-err:
+err_irq:
irq_dispose_mapping(viu_irq);
return ret;
}
diff --git a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
index aaa172d..e6e9ace 100644
--- a/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
+++ b/drivers/media/platform/msm/camera/cam_isp/cam_isp_context.c
@@ -1219,7 +1219,7 @@
ctx_isp->substate_activated);
req_isp = (struct cam_isp_ctx_req *) req->req_priv;
- if (ctx_isp->active_req_cnt >= 2) {
+ if (ctx_isp->active_req_cnt >= 4) {
CAM_ERR_RATE_LIMIT(CAM_ISP,
"Reject apply request (id %lld) due to congestion(cnt = %d)",
req->request_id,
diff --git a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
index ff385ca..8c075f5 100644
--- a/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
+++ b/drivers/media/platform/msm/camera/cam_sensor_module/cam_flash/cam_flash_core.c
@@ -1565,9 +1565,7 @@
if ((fctrl->flash_state == CAM_FLASH_STATE_CONFIG) ||
(fctrl->flash_state == CAM_FLASH_STATE_START)) {
- mutex_lock(&(fctrl->flash_mutex));
fctrl->func_tbl.flush_req(fctrl, FLUSH_ALL, 0);
- mutex_unlock(&(fctrl->flash_mutex));
rc = fctrl->func_tbl.power_ops(fctrl, false);
if (rc)
CAM_ERR(CAM_FLASH, "Power Down Failed rc: %d",
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index 559ad4d..bcd9274 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -37,6 +37,7 @@
static struct mutex ordered_sd_mtx;
static struct mutex v4l2_event_mtx;
+static atomic_t qos_add_request_done = ATOMIC_INIT(0);
static struct pm_qos_request msm_v4l2_pm_qos_request;
static struct msm_queue_head *msm_session_q;
@@ -223,9 +224,11 @@
return (ack->stream_id == *(unsigned int *)d2) ? 1 : 0;
}
-static void msm_pm_qos_add_request(void)
+static inline void msm_pm_qos_add_request(void)
{
pr_info("%s: add request", __func__);
+ if (atomic_cmpxchg(&qos_add_request_done, 0, 1))
+ return;
pm_qos_add_request(&msm_v4l2_pm_qos_request, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
}
@@ -243,6 +246,7 @@
*/
if (msm_session_q && msm_session_q->len == 0) {
pr_info("%s: update request %d", __func__, val);
+ msm_pm_qos_add_request();
pm_qos_update_request(&msm_v4l2_pm_qos_request, val);
}
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h
index 4c5ce02..bcd8a9e 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_10_0_0_hwreg.h
@@ -28,6 +28,9 @@
#define mask_enable_clk_B 0x2
#define mask_ctrl_1_A 0x5
#define mask_ctrl_1_B 0xA
+#define mask_reset_A 0x1
+#define mask_reset_B 0x7
+#define mask_shutdown_A 0x3
#define mask_hs_freq_range 0x7F
#define mask_osc_freq_2 0xFF
#define mask_osc_freq_3 0xF00
@@ -56,8 +59,6 @@
{0x58C, 0xFF}, /* mipi_csiphy_irq_mask_ctrl_lane_0 */
{0x5C8, 0xFF}, /* mipi_csiphy_irq_mask_ctrl_lane_clk_0 */
{0x20, 0x0}, /* mipi_csiphy_rx_sys_7_00 */
- {0x28, 0x43}, /* mipi_csiphy_rx_sys_9_00 */
- {0x380, 0x0}, /* mipi_csiphy_rx_startup_ovr_0_00 */
{0x384, 0x0}, /* mipi_csiphy_rx_startup_ovr_1_00 */
{0x388, 0xCC}, /* mipi_csiphy_rx_startup_ovr_2_00 */
{0x38C, 0x1}, /* mipi_csiphy_rx_startup_ovr_3_00 */
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index 1f359bb..f98e23f 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -252,10 +252,13 @@
csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
mipi_csiphy_rx_sys_7_00.addr + offset);
- msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
- mipi_csiphy_rx_sys_9_00.data,
+ value = msm_camera_io_r(csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_rx_clk_lane_6_00.addr + offset);
+ value |= SET_THE_BIT(7);
+ msm_camera_io_w(value,
csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
- mipi_csiphy_rx_sys_9_00.addr + offset);
+ mipi_csiphy_rx_clk_lane_6_00.addr + offset);
msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
mipi_csiphy_rx_startup_ovr_4_00.data,
@@ -317,7 +320,7 @@
uint16_t lane_mask = 0;
void __iomem *csiphybase;
enum snps_csiphy_mode mode = INVALID_MODE;
- uint32_t value, num_tries, num_lanes, offset;
+ uint32_t value, num_tries, num_lanes, offset = SNPS_INTERPHY_OFFSET;
uint32_t clk_mux_reg = 0;
csiphybase = csiphy_dev->base;
@@ -497,17 +500,6 @@
value = msm_camera_io_r(csiphybase +
csiphy_dev->ctrl_reg->csiphy_snps_reg.
- mipi_csiphy_rx_startup_ovr_0_00.addr +
- SNPS_INTERPHY_OFFSET);
- value |= SET_THE_BIT(0);
- value |= SET_THE_BIT(1);
- msm_camera_io_w(value,
- csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
- mipi_csiphy_rx_startup_ovr_0_00.addr +
- SNPS_INTERPHY_OFFSET);
-
- value = msm_camera_io_r(csiphybase +
- csiphy_dev->ctrl_reg->csiphy_snps_reg.
mipi_csiphy_rx_startup_ovr_1_00.addr +
SNPS_INTERPHY_OFFSET);
value &= ~(SET_THE_BIT(0));
@@ -521,6 +513,7 @@
csiphy_dev->ctrl_reg->csiphy_snps_reg.
mipi_csiphy_rx_clk_lane_6_00.addr);
value |= SET_THE_BIT(2);
+ value &= ~(SET_THE_BIT(7));
msm_camera_io_w(value,
csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
mipi_csiphy_rx_clk_lane_6_00.addr);
@@ -530,7 +523,7 @@
mipi_csiphy_rx_clk_lane_6_00.addr +
SNPS_INTERPHY_OFFSET);
value |= SET_THE_BIT(3);
- value |= SET_THE_BIT(7);
+ value &= ~(SET_THE_BIT(7));
value &= ~(SET_THE_BIT(2));
msm_camera_io_w(value,
csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
@@ -592,36 +585,109 @@
csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
mipi_csiphy_enable_clk.addr);
- value = 0x0;
- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A)
- value |= mask_ctrl_1_A;
- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_B)
- value |= mask_ctrl_1_B;
- msm_camera_io_w(value,
+ if (mode == TWO_LANE_PHY_A) {
+ msm_camera_io_w(mask_reset_A,
csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
mipi_csiphy_ctrl_1.addr);
- if (mode == AGGREGATE_MODE || mode == TWO_LANE_PHY_A)
- offset = 0x0;
- else
- offset = SNPS_INTERPHY_OFFSET;
+ msm_camera_io_w(mask_ctrl_1_A,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_ctrl_1.addr);
- value = 0x0;
- num_tries = 0;
+ value = 0x0;
+ num_tries = 0;
- do {
- num_tries++;
- value = msm_camera_io_r(csiphybase +
- csiphy_dev->ctrl_reg->csiphy_snps_reg.
- mipi_csiphy_rx_startup_obs_2_00.addr + offset);
- if ((value | SET_THE_BIT(4)) == value)
- break;
- usleep_range(100, 150);
- } while (num_tries < 6);
+ do {
+ num_tries++;
+ value = msm_camera_io_r(csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_rx_startup_obs_2_00.addr);
+ if ((value | SET_THE_BIT(4)) == value)
+ break;
+ usleep_range(100, 150);
+ } while (num_tries < 6);
+ if ((value | SET_THE_BIT(4)) != value) {
+ pr_err("%s: SNPS phy config failed\n", __func__);
+ return -EINVAL;
+ }
+ }
- if ((value | SET_THE_BIT(4)) != value) {
- pr_err("%s: SNPS phy config failed\n", __func__);
- return -EINVAL;
+ if (mode == TWO_LANE_PHY_B) {
+ msm_camera_io_w(mask_reset_B,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_ctrl_1.addr);
+
+ msm_camera_io_w(mask_ctrl_1_A|mask_ctrl_1_B,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_ctrl_1.addr);
+
+ value = 0x0;
+ num_tries = 0;
+
+ do {
+ num_tries++;
+ value = msm_camera_io_r(csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_rx_startup_obs_2_00.addr + offset);
+ if ((value | SET_THE_BIT(4)) == value)
+ break;
+ usleep_range(100, 150);
+ } while (num_tries < 6);
+
+ if ((value | SET_THE_BIT(4)) != value) {
+ pr_err("%s: SNPS phy config failed\n", __func__);
+ return -EINVAL;
+ }
+ }
+
+ if (mode == AGGREGATE_MODE) {
+ msm_camera_io_w(mask_shutdown_A,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_ctrl_1.addr);
+
+ msm_camera_io_w(mask_reset_B,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_ctrl_1.addr);
+
+ value = 0x0;
+ num_tries = 0;
+
+ do {
+ num_tries++;
+ value = msm_camera_io_r(csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_rx_startup_obs_2_00.addr);
+ if ((value | SET_THE_BIT(4)) == value)
+ break;
+ usleep_range(100, 150);
+ } while (num_tries < 6);
+
+ if ((value | SET_THE_BIT(4)) != value) {
+ pr_err("%s: SNPS phy config failed\n", __func__);
+ return -EINVAL;
+ }
+
+ msm_camera_io_w(mask_ctrl_1_A|mask_ctrl_1_B,
+ csiphybase + csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_ctrl_1.addr);
+
+ value = 0x0;
+ num_tries = 0;
+
+ do {
+ num_tries++;
+ value = msm_camera_io_r(csiphybase +
+ csiphy_dev->ctrl_reg->csiphy_snps_reg.
+ mipi_csiphy_rx_startup_obs_2_00.addr + offset);
+ if ((value | SET_THE_BIT(4)) == value)
+ break;
+ usleep_range(100, 150);
+ } while (num_tries < 6);
+
+ if ((value | SET_THE_BIT(4)) != value) {
+ pr_err("%s: SNPS phy config failed\n", __func__);
+ return -EINVAL;
+ }
}
msm_camera_io_w(csiphy_dev->ctrl_reg->csiphy_snps_reg.
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index 6fc084c..dade2e3 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -83,8 +83,6 @@
struct csiphy_reg_t mipi_csiphy_irq_mask_ctrl_lane_0;
struct csiphy_reg_t mipi_csiphy_irq_mask_ctrl_lane_clk_0;
struct csiphy_reg_t mipi_csiphy_rx_sys_7_00;
- struct csiphy_reg_t mipi_csiphy_rx_sys_9_00;
- struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_0_00;
struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_1_00;
struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_2_00;
struct csiphy_reg_t mipi_csiphy_rx_startup_ovr_3_00;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c b/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c
index ee695d8..da407a7 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/laser_led/msm_laser_led.c
@@ -70,18 +70,15 @@
.i2c_util = msm_sensor_cci_i2c_util,
.i2c_poll = msm_camera_cci_i2c_poll,
};
-#ifdef CONFIG_COMPAT
+
static int32_t msm_laser_led_init(
struct msm_laser_led_ctrl_t *laser_led_ctrl,
- struct msm_laser_led_cfg_data_t32 __user *laser_led_data)
-#else
-static int32_t msm_laser_led_init(
- struct msm_laser_led_ctrl_t *laser_led_ctrl,
- struct msm_laser_led_cfg_data_t __user *laser_led_data)
-#endif
+ void __user *argp)
{
int32_t rc = -EFAULT;
struct msm_camera_cci_client *cci_client = NULL;
+ struct msm_laser_led_cfg_data_t __user *laser_led_data =
+ (struct msm_laser_led_cfg_data_t __user *) argp;
CDBG("Enter\n");
@@ -263,6 +260,53 @@
return rc;
}
+
+static int32_t msm_laser_led_init32(
+ struct msm_laser_led_ctrl_t *laser_led_ctrl,
+ void __user *argp)
+{
+ int32_t rc = -EFAULT;
+ struct msm_laser_led_cfg_data_t32 __user *laser_led_data =
+ (struct msm_laser_led_cfg_data_t32 __user *) argp;
+ struct msm_camera_cci_client *cci_client = NULL;
+
+ CDBG("Enter\n");
+
+ if (laser_led_ctrl->laser_led_state == MSM_CAMERA_LASER_LED_INIT) {
+ pr_err("Invalid laser_led state = %d\n",
+ laser_led_ctrl->laser_led_state);
+ return 0;
+ }
+
+ rc = laser_led_ctrl->i2c_client.i2c_func_tbl->i2c_util(
+ &laser_led_ctrl->i2c_client, MSM_CCI_INIT);
+ if (rc < 0)
+ pr_err("cci_init failed\n");
+
+ cci_client = laser_led_ctrl->i2c_client.cci_client;
+
+ if (copy_from_user(&(cci_client->sid),
+ &(laser_led_data->i2c_addr),
+ sizeof(uint16_t))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+ cci_client->sid = cci_client->sid >> 1;
+ cci_client->retries = 3;
+ cci_client->id_map = 0;
+
+ if (copy_from_user(&(cci_client->i2c_freq_mode),
+ &(laser_led_data->i2c_freq_mode),
+ sizeof(enum i2c_freq_mode_t))) {
+ pr_err("%s:%d failed\n", __func__, __LINE__);
+ return -EFAULT;
+ }
+
+ laser_led_ctrl->laser_led_state = MSM_CAMERA_LASER_LED_INIT;
+
+ CDBG("Exit\n");
+ return 0;
+}
#endif
static int32_t msm_laser_led_control(
@@ -381,7 +425,12 @@
switch (cfg_type) {
case CFG_LASER_LED_INIT:
- rc = msm_laser_led_init(laser_led_ctrl, laser_led_data);
+#ifdef CONFIG_COMPAT
+ if (is_compat_task())
+ rc = msm_laser_led_init32(laser_led_ctrl, argp);
+ else
+#endif
+ rc = msm_laser_led_init(laser_led_ctrl, argp);
break;
case CFG_LASER_LED_CONTROL:
#ifdef CONFIG_COMPAT
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index e6a4ed30..148df7d 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1393,7 +1393,6 @@
struct hal_h264_entropy_control h264_entropy_control;
struct hal_intra_period intra_period;
struct hal_idr_period idr_period;
- struct hal_vpe_rotation vpe_rotation;
struct hal_intra_refresh intra_refresh;
struct hal_multi_slice_control multi_slice_control;
struct hal_h264_db_control h264_db_control;
@@ -1692,28 +1691,12 @@
break;
case V4L2_CID_MPEG_VIDC_VIDEO_ROTATION:
{
- temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_FLIP);
- property_id = HAL_PARAM_VPE_ROTATION;
- vpe_rotation.rotate = msm_comm_v4l2_to_hal(
- V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
- ctrl->val);
- vpe_rotation.flip = msm_comm_v4l2_to_hal(
- V4L2_CID_MPEG_VIDC_VIDEO_FLIP,
- temp_ctrl->val);
- pdata = &vpe_rotation;
+ dprintk(VIDC_DBG, "Rotation %d\n", ctrl->val);
break;
}
case V4L2_CID_MPEG_VIDC_VIDEO_FLIP:
{
- temp_ctrl = TRY_GET_CTRL(V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
- property_id = HAL_PARAM_VPE_ROTATION;
- vpe_rotation.rotate = msm_comm_v4l2_to_hal(
- V4L2_CID_MPEG_VIDC_VIDEO_ROTATION,
- temp_ctrl->val);
- vpe_rotation.flip = msm_comm_v4l2_to_hal(
- V4L2_CID_MPEG_VIDC_VIDEO_FLIP,
- ctrl->val);
- pdata = &vpe_rotation;
+ dprintk(VIDC_DBG, "Flip %d\n", ctrl->val);
break;
}
case V4L2_CID_MPEG_VIDEO_MULTI_SLICE_MODE: {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 815d137..18bd77f 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -946,6 +946,59 @@
return 0;
}
+static int msm_vidc_set_rotation(struct msm_vidc_inst *inst)
+{
+ int rc = 0;
+ int value = 0;
+ struct hfi_device *hdev;
+ struct hal_vpe_rotation vpe_rotation;
+ struct hal_frame_size frame_sz;
+
+ hdev = inst->core->device;
+
+ /* Set rotation and flip first */
+ value = msm_comm_g_ctrl_for_id(inst, V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
+ if (value < 0) {
+ dprintk(VIDC_ERR, "Get control for rotation failed\n");
+ return value;
+ }
+ vpe_rotation.rotate = value;
+ value = msm_comm_g_ctrl_for_id(inst, V4L2_CID_MPEG_VIDC_VIDEO_FLIP);
+ if (value < 0) {
+ dprintk(VIDC_ERR, "Get control for flip failed\n");
+ return value;
+ }
+ vpe_rotation.flip = value;
+ dprintk(VIDC_DBG, "Set rotation = %d, flip = %d for capture port.\n",
+ vpe_rotation.rotate, vpe_rotation.flip);
+ rc = call_hfi_op(hdev, session_set_property,
+ (void *)inst->session,
+ HAL_PARAM_VPE_ROTATION, &vpe_rotation);
+ if (rc) {
+ dprintk(VIDC_ERR, "Set rotation/flip at start stream failed\n");
+ return rc;
+ }
+
+ /* flip the output resolution if required */
+ value = vpe_rotation.rotate;
+ if (value == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90 ||
+ value == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270) {
+ frame_sz.buffer_type = HAL_BUFFER_OUTPUT;
+ frame_sz.width = inst->prop.height[CAPTURE_PORT];
+ frame_sz.height = inst->prop.width[CAPTURE_PORT];
+ dprintk(VIDC_DBG, "CAPTURE port width = %d, height = %d\n",
+ frame_sz.width, frame_sz.height);
+ rc = call_hfi_op(hdev, session_set_property, (void *)
+ inst->session, HAL_PARAM_FRAME_SIZE, &frame_sz);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Failed to set framesize for CAPTURE port\n");
+ return rc;
+ }
+ }
+ return rc;
+}
+
static inline int start_streaming(struct msm_vidc_inst *inst)
{
int rc = 0;
@@ -954,6 +1007,15 @@
hdev = inst->core->device;
+ if (inst->session_type == MSM_VIDC_ENCODER) {
+ rc = msm_vidc_set_rotation(inst);
+ if (rc) {
+ dprintk(VIDC_ERR,
+ "Set rotation for encoder failed\n");
+ goto fail_start;
+ }
+ }
+
/* Create tile info table */
rc = msm_vidc_create_tile_info_table(inst);
if (rc) {
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index 176b9c6..f17601b 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -1645,12 +1645,12 @@
planes[0] = event_notify->packet_buffer;
planes[1] = event_notify->extra_data_buffer;
mbuf = msm_comm_get_buffer_using_device_planes(inst, planes);
- mbuf->output_tag = event_notify->output_tag;
if (!mbuf || !kref_get_mbuf(inst, mbuf)) {
dprintk(VIDC_ERR,
"%s: data_addr %x, extradata_addr %x not found\n",
__func__, planes[0], planes[1]);
} else {
+ mbuf->output_tag = event_notify->output_tag;
handle_release_buffer_reference(inst, mbuf);
kref_put_mbuf(mbuf);
}
@@ -5500,7 +5500,8 @@
LOAD_CALC_IGNORE_THUMBNAIL_LOAD |
LOAD_CALC_IGNORE_NON_REALTIME_LOAD;
- if (inst->state == MSM_VIDC_OPEN_DONE) {
+ if (inst->state >= MSM_VIDC_OPEN_DONE &&
+ inst->state <= MSM_VIDC_STOP_DONE) {
max_load_adj = inst->core->resources.max_load;
num_mbs_per_sec = msm_comm_get_load(inst->core,
MSM_VIDC_DECODER, quirks);
@@ -5522,7 +5523,6 @@
{
u32 x_min, x_max, y_min, y_max;
u32 input_height, input_width, output_height, output_width;
- u32 rotation;
if (is_heic_encode_session(inst)) {
dprintk(VIDC_DBG, "Skip downscale check for HEIC\n");
@@ -5563,20 +5563,6 @@
return 0;
}
- rotation = msm_comm_g_ctrl_for_id(inst,
- V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
-
- if ((output_width != output_height) &&
- (rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90 ||
- rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270)) {
-
- output_width = inst->prop.height[CAPTURE_PORT];
- output_height = inst->prop.width[CAPTURE_PORT];
- dprintk(VIDC_DBG,
- "Rotation=%u Swapped Output W=%u H=%u to check scaling",
- rotation, output_width, output_height);
- }
-
x_min = (1<<16)/inst->capability.scale_x.min;
y_min = (1<<16)/inst->capability.scale_y.min;
x_max = inst->capability.scale_x.max >> 16;
@@ -5622,7 +5608,6 @@
struct hfi_device *hdev;
struct msm_vidc_core *core;
u32 output_height, output_width, input_height, input_width;
- u32 rotation;
if (!inst || !inst->core || !inst->core->device) {
dprintk(VIDC_WARN, "%s: Invalid parameter\n", __func__);
@@ -5661,23 +5646,9 @@
rc = -ENOTSUPP;
}
- rotation = msm_comm_g_ctrl_for_id(inst,
- V4L2_CID_MPEG_VIDC_VIDEO_ROTATION);
-
output_height = ALIGN(inst->prop.height[CAPTURE_PORT], 16);
output_width = ALIGN(inst->prop.width[CAPTURE_PORT], 16);
- if ((output_width != output_height) &&
- (rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_90 ||
- rotation == V4L2_CID_MPEG_VIDC_VIDEO_ROTATION_270)) {
-
- output_width = ALIGN(inst->prop.height[CAPTURE_PORT], 16);
- output_height = ALIGN(inst->prop.width[CAPTURE_PORT], 16);
- dprintk(VIDC_DBG,
- "Rotation=%u Swapped Output W=%u H=%u to check capability",
- rotation, output_width, output_height);
- }
-
if (!rc) {
if (output_width < capability->width.min ||
output_height < capability->height.min) {
@@ -6474,7 +6445,7 @@
mutex_lock(&inst->registeredbufs.lock);
found = false;
list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
- if (msm_comm_compare_device_planes(mbuf, planes)) {
+ if (msm_comm_compare_device_plane(mbuf, planes, 0)) {
found = true;
break;
}
@@ -6549,8 +6520,14 @@
mutex_lock(&inst->registeredbufs.lock);
if (inst->session_type == MSM_VIDC_DECODER) {
list_for_each_entry(mbuf, &inst->registeredbufs.list, list) {
- if (msm_comm_compare_dma_planes(inst, mbuf,
- dma_planes)) {
+ /*
+ * client might have queued same plane[0] but different
+ * plane[1] search plane[0] and if found don't queue the
+ * buffer, the buffer will be queued when rbr event
+ * arrived.
+ */
+ if (msm_comm_compare_dma_plane(inst, mbuf,
+ dma_planes, 0)) {
found = true;
break;
}
@@ -6607,37 +6584,15 @@
}
}
- /* special handling for decoder */
+ /* special handling for decoder
+ * If RBR pending on this buffer then enable RBR_PENDING flag
+ * and clear the DEFERRED flag to avoid this buffer getting
+ * queued to video hardware in msm_comm_qbuf() which tries to
+ * queue all the DEFERRED buffers.
+ */
if (inst->session_type == MSM_VIDC_DECODER) {
if (found) {
rc = -EEXIST;
- } else {
- bool found_plane0 = false;
- struct msm_vidc_buffer *temp;
- /*
- * client might have queued same plane[0] but different
- * plane[1] search plane[0] and if found don't queue the
- * buffer, the buffer will be queued when rbr event
- * arrived.
- */
- list_for_each_entry(temp, &inst->registeredbufs.list,
- list) {
- if (msm_comm_compare_dma_plane(inst, temp,
- dma_planes, 0)) {
- found_plane0 = true;
- break;
- }
- }
- if (found_plane0)
- rc = -EEXIST;
- }
- /*
- * If RBR pending on this buffer then enable RBR_PENDING flag
- * and clear the DEFERRED flag to avoid this buffer getting
- * queued to video hardware in msm_comm_qbuf() which tries to
- * queue all the DEFERRED buffers.
- */
- if (rc == -EEXIST) {
mbuf->flags |= MSM_VIDC_FLAG_RBR_PENDING;
mbuf->flags &= ~MSM_VIDC_FLAG_DEFERRED;
}
@@ -6692,22 +6647,23 @@
goto unlock;
}
- print_vidc_buffer(VIDC_DBG, "dqbuf", inst, mbuf);
for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
print_vidc_buffer(VIDC_ERR,
- "dqbuf: unmap failed.", inst, mbuf);
+ "dqbuf: unmap failed.", inst, mbuf);
- if (!(mbuf->vvb.flags & V4L2_QCOM_BUF_FLAG_READONLY)) {
- /* rbr won't come for this buffer */
- if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
- print_vidc_buffer(VIDC_ERR,
- "dqbuf: unmap failed..", inst, mbuf);
- } else {
- /* RBR event expected */
+ if (i == 0 && mbuf->vvb.flags & V4L2_QCOM_BUF_FLAG_READONLY) {
+ /* RBR event expected only for plane[0] */
mbuf->flags |= MSM_VIDC_FLAG_RBR_PENDING;
+ continue;
+ }
+
+ if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i])) {
+ print_vidc_buffer(VIDC_ERR,
+ "dqbuf: unmap failed..", inst, mbuf);
}
}
+ print_vidc_buffer(VIDC_DBG, "dqbuf", inst, mbuf);
/*
* remove the entry if plane[0].refcount is zero else
* don't remove as client queued same buffer that's why
@@ -6755,39 +6711,27 @@
/* clear RBR_PENDING flag */
mbuf->flags &= ~MSM_VIDC_FLAG_RBR_PENDING;
- for (i = 0; i < mbuf->vvb.vb2_buf.num_planes; i++) {
- if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[i]))
- print_vidc_buffer(VIDC_ERR,
+ /*
+ * Extradata plane is not accounted for in the registered list.
+ * Hence unref only the image plane to check if it has
+ * outstanding refs (pending rbr case)
+ */
+ if (msm_smem_unmap_dma_buf(inst, &mbuf->smem[0]))
+ print_vidc_buffer(VIDC_ERR,
"rbr unmap failed.", inst, mbuf);
- }
+
/* refcount is not zero if client queued the same buffer */
if (!mbuf->smem[0].refcount) {
list_del(&mbuf->list);
kref_put_mbuf(mbuf);
mbuf = NULL;
+ found = false;
}
} else {
print_vidc_buffer(VIDC_ERR, "mbuf not found", inst, mbuf);
goto unlock;
}
- /*
- * 1. client might have pushed same planes in which case mbuf will be
- * same and refcounts are positive and buffer wouldn't have been
- * removed from the registeredbufs list.
- * 2. client might have pushed same planes[0] but different planes[1]
- * in which case mbuf will be different.
- * 3. in either case we can search mbuf->smem[0].device_addr in the list
- * and if found queue it to video hw (if not flushing).
- */
- found = false;
- list_for_each_entry(temp, &inst->registeredbufs.list, list) {
- if (msm_comm_compare_device_plane(temp, planes, 0)) {
- mbuf = temp;
- found = true;
- break;
- }
- }
if (!found)
goto unlock;
diff --git a/drivers/media/platform/omap3isp/isp.c b/drivers/media/platform/omap3isp/isp.c
index 15a86bb..1e98b48 100644
--- a/drivers/media/platform/omap3isp/isp.c
+++ b/drivers/media/platform/omap3isp/isp.c
@@ -304,7 +304,7 @@
static int isp_xclk_init(struct isp_device *isp)
{
struct device_node *np = isp->dev->of_node;
- struct clk_init_data init;
+ struct clk_init_data init = { 0 };
unsigned int i;
for (i = 0; i < ARRAY_SIZE(isp->xclks); ++i)
diff --git a/drivers/media/platform/s3c-camif/camif-capture.c b/drivers/media/platform/s3c-camif/camif-capture.c
index 5c9db09..d9710b5 100644
--- a/drivers/media/platform/s3c-camif/camif-capture.c
+++ b/drivers/media/platform/s3c-camif/camif-capture.c
@@ -117,6 +117,8 @@
if (camif->sensor.power_count == !on)
err = v4l2_subdev_call(sensor->sd, core, s_power, on);
+ if (err == -ENOIOCTLCMD)
+ err = 0;
if (!err)
sensor->power_count += on ? 1 : -1;
diff --git a/drivers/media/usb/dvb-usb-v2/af9035.c b/drivers/media/usb/dvb-usb-v2/af9035.c
index 8961dd7..64be30d 100644
--- a/drivers/media/usb/dvb-usb-v2/af9035.c
+++ b/drivers/media/usb/dvb-usb-v2/af9035.c
@@ -406,8 +406,10 @@
msg[0].addr == (state->af9033_i2c_addr[1] >> 1))
reg |= 0x100000;
- ret = af9035_wr_regs(d, reg, &msg[0].buf[3],
- msg[0].len - 3);
+ ret = (msg[0].len >= 3) ? af9035_wr_regs(d, reg,
+ &msg[0].buf[3],
+ msg[0].len - 3)
+ : -EOPNOTSUPP;
} else {
/* I2C write */
u8 buf[MAX_XFER_SIZE];
diff --git a/drivers/media/usb/tm6000/tm6000-dvb.c b/drivers/media/usb/tm6000/tm6000-dvb.c
index 0426b21..ee88ae8 100644
--- a/drivers/media/usb/tm6000/tm6000-dvb.c
+++ b/drivers/media/usb/tm6000/tm6000-dvb.c
@@ -273,6 +273,11 @@
ret = dvb_register_adapter(&dvb->adapter, "Trident TVMaster 6000 DVB-T",
THIS_MODULE, &dev->udev->dev, adapter_nr);
+ if (ret < 0) {
+ pr_err("tm6000: couldn't register the adapter!\n");
+ goto err;
+ }
+
dvb->adapter.priv = dev;
if (dvb->frontend) {
diff --git a/drivers/media/usb/uvc/uvc_video.c b/drivers/media/usb/uvc/uvc_video.c
index b5589d5..48503f30 100644
--- a/drivers/media/usb/uvc/uvc_video.c
+++ b/drivers/media/usb/uvc/uvc_video.c
@@ -163,14 +163,27 @@
}
}
+static size_t uvc_video_ctrl_size(struct uvc_streaming *stream)
+{
+ /*
+ * Return the size of the video probe and commit controls, which depends
+ * on the protocol version.
+ */
+ if (stream->dev->uvc_version < 0x0110)
+ return 26;
+ else if (stream->dev->uvc_version < 0x0150)
+ return 34;
+ else
+ return 48;
+}
+
static int uvc_get_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl, int probe, __u8 query)
{
+ __u16 size = uvc_video_ctrl_size(stream);
__u8 *data;
- __u16 size;
int ret;
- size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
if ((stream->dev->quirks & UVC_QUIRK_PROBE_DEF) &&
query == UVC_GET_DEF)
return -EIO;
@@ -225,7 +238,7 @@
ctrl->dwMaxVideoFrameSize = get_unaligned_le32(&data[18]);
ctrl->dwMaxPayloadTransferSize = get_unaligned_le32(&data[22]);
- if (size == 34) {
+ if (size >= 34) {
ctrl->dwClockFrequency = get_unaligned_le32(&data[26]);
ctrl->bmFramingInfo = data[30];
ctrl->bPreferedVersion = data[31];
@@ -254,11 +267,10 @@
static int uvc_set_video_ctrl(struct uvc_streaming *stream,
struct uvc_streaming_control *ctrl, int probe)
{
+ __u16 size = uvc_video_ctrl_size(stream);
__u8 *data;
- __u16 size;
int ret;
- size = stream->dev->uvc_version >= 0x0110 ? 34 : 26;
data = kzalloc(size, GFP_KERNEL);
if (data == NULL)
return -ENOMEM;
@@ -275,7 +287,7 @@
put_unaligned_le32(ctrl->dwMaxVideoFrameSize, &data[18]);
put_unaligned_le32(ctrl->dwMaxPayloadTransferSize, &data[22]);
- if (size == 34) {
+ if (size >= 34) {
put_unaligned_le32(ctrl->dwClockFrequency, &data[26]);
data[30] = ctrl->bmFramingInfo;
data[31] = ctrl->bPreferedVersion;
diff --git a/drivers/media/v4l2-core/v4l2-event.c b/drivers/media/v4l2-core/v4l2-event.c
index 8d3171c..567d868 100644
--- a/drivers/media/v4l2-core/v4l2-event.c
+++ b/drivers/media/v4l2-core/v4l2-event.c
@@ -119,14 +119,6 @@
if (sev == NULL)
return;
- /*
- * If the event has been added to the fh->subscribed list, but its
- * add op has not completed yet elems will be 0, treat this as
- * not being subscribed.
- */
- if (!sev->elems)
- return;
-
/* Increase event sequence number on fh. */
fh->sequence++;
@@ -212,6 +204,7 @@
struct v4l2_subscribed_event *sev, *found_ev;
unsigned long flags;
unsigned i;
+ int ret = 0;
if (sub->type == V4L2_EVENT_ALL)
return -EINVAL;
@@ -229,31 +222,36 @@
sev->flags = sub->flags;
sev->fh = fh;
sev->ops = ops;
+ sev->elems = elems;
+
+ mutex_lock(&fh->subscribe_lock);
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
found_ev = v4l2_event_subscribed(fh, sub->type, sub->id);
- if (!found_ev)
- list_add(&sev->list, &fh->subscribed);
spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
if (found_ev) {
+ /* Already listening */
kfree(sev);
- return 0; /* Already listening */
+ goto out_unlock;
}
if (sev->ops && sev->ops->add) {
- int ret = sev->ops->add(sev, elems);
+ ret = sev->ops->add(sev, elems);
if (ret) {
- sev->ops = NULL;
- v4l2_event_unsubscribe(fh, sub);
- return ret;
+ kfree(sev);
+ goto out_unlock;
}
}
- /* Mark as ready for use */
- sev->elems = elems;
+ spin_lock_irqsave(&fh->vdev->fh_lock, flags);
+ list_add(&sev->list, &fh->subscribed);
+ spin_unlock_irqrestore(&fh->vdev->fh_lock, flags);
- return 0;
+out_unlock:
+ mutex_unlock(&fh->subscribe_lock);
+
+ return ret;
}
EXPORT_SYMBOL_GPL(v4l2_event_subscribe);
@@ -292,6 +290,8 @@
return 0;
}
+ mutex_lock(&fh->subscribe_lock);
+
spin_lock_irqsave(&fh->vdev->fh_lock, flags);
sev = v4l2_event_subscribed(fh, sub->type, sub->id);
@@ -309,6 +309,8 @@
if (sev && sev->ops && sev->ops->del)
sev->ops->del(sev);
+ mutex_unlock(&fh->subscribe_lock);
+
kfree(sev);
return 0;
diff --git a/drivers/media/v4l2-core/v4l2-fh.c b/drivers/media/v4l2-core/v4l2-fh.c
index c183f09..0c5e690 100644
--- a/drivers/media/v4l2-core/v4l2-fh.c
+++ b/drivers/media/v4l2-core/v4l2-fh.c
@@ -50,6 +50,7 @@
INIT_LIST_HEAD(&fh->available);
INIT_LIST_HEAD(&fh->subscribed);
fh->sequence = -1;
+ mutex_init(&fh->subscribe_lock);
}
EXPORT_SYMBOL_GPL(v4l2_fh_init);
@@ -95,6 +96,7 @@
return;
v4l_disable_media_source(fh->vdev);
v4l2_event_unsubscribe_all(fh);
+ mutex_destroy(&fh->subscribe_lock);
fh->vdev = NULL;
}
EXPORT_SYMBOL_GPL(v4l2_fh_exit);
diff --git a/drivers/mfd/omap-usb-host.c b/drivers/mfd/omap-usb-host.c
index 7aab376..3785c63 100644
--- a/drivers/mfd/omap-usb-host.c
+++ b/drivers/mfd/omap-usb-host.c
@@ -548,8 +548,8 @@
}
static const struct of_device_id usbhs_child_match_table[] = {
- { .compatible = "ti,omap-ehci", },
- { .compatible = "ti,omap-ohci", },
+ { .compatible = "ti,ehci-omap", },
+ { .compatible = "ti,ohci-omap3", },
{ }
};
@@ -875,6 +875,7 @@
.pm = &usbhsomap_dev_pm_ops,
.of_match_table = usbhs_omap_dt_ids,
},
+ .probe = usbhs_omap_probe,
.remove = usbhs_omap_remove,
};
@@ -884,9 +885,9 @@
MODULE_LICENSE("GPL v2");
MODULE_DESCRIPTION("usb host common core driver for omap EHCI and OHCI");
-static int __init omap_usbhs_drvinit(void)
+static int omap_usbhs_drvinit(void)
{
- return platform_driver_probe(&usbhs_omap_driver, usbhs_omap_probe);
+ return platform_driver_register(&usbhs_omap_driver);
}
/*
@@ -898,7 +899,7 @@
*/
fs_initcall_sync(omap_usbhs_drvinit);
-static void __exit omap_usbhs_drvexit(void)
+static void omap_usbhs_drvexit(void)
{
platform_driver_unregister(&usbhs_omap_driver);
}
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index a233173..cf5764e 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -827,3 +827,13 @@
source "drivers/misc/cxl/Kconfig"
source "drivers/misc/fpr_FingerprintCard/Kconfig"
endmenu
+
+config OKL4_LINK_SHBUF
+ tristate "OKL4 link with shared buffer transport"
+ default y
+ depends on OKL4_GUEST
+ help
+ Enable driver for OKL4 inter-cell links using the "shared-buffer"
+ transport. This driver presents the link to Linux as a character device
+ which can be written to or read from to access the shared memory. An ioctl
+ on the device is used to send a virtual interrupt to the partner cell.
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index 8e5d0f6..6494a66 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -80,3 +80,5 @@
targets += lkdtm_rodata.o lkdtm_rodata_objcopy.o
$(obj)/lkdtm_rodata_objcopy.o: $(obj)/lkdtm_rodata.o FORCE
$(call if_changed,objcopy)
+
+obj-$(CONFIG_OKL4_LINK_SHBUF) += okl4-link-shbuf.o
diff --git a/drivers/misc/okl4-link-shbuf.c b/drivers/misc/okl4-link-shbuf.c
new file mode 100644
index 0000000..de65ea0
--- /dev/null
+++ b/drivers/misc/okl4-link-shbuf.c
@@ -0,0 +1,667 @@
+/*
+ * Driver for inter-cell links using the shared-buffer transport.
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ */
+#include <linux/atomic.h>
+#include <linux/cdev.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/platform_device.h>
+#include <linux/poll.h>
+#include <linux/rwsem.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/version.h>
+#include <microvisor/microvisor.h>
+#include <uapi/linux/okl4-link-shbuf.h>
+
+static const char DEVICE_NAME[] = "okl4_link_shbuf";
+
+/* Created devices will appear as /dev/<DEV_PREFIX><name> */
+static const char DEV_PREFIX[] = "okl4-";
+
+static const struct of_device_id okl4_link_shbuf_match[] = {
+ {
+ .compatible = "okl,microvisor-link-shbuf",
+ },
+ {},
+};
+MODULE_DEVICE_TABLE(of, okl4_link_shbuf_match);
+
+static struct class *link_shbuf_class;
+static dev_t link_shbuf_dev;
+
+/* A lock used to protect access to link_shbuf_dev */
+static spinlock_t device_number_allocate;
+
+/* Sentinel values for indicating missing communication channels */
+static const u32 NO_OUTGOING_IRQ = 0;
+static const int NO_INCOMING_IRQ = -1;
+
+/* Private data for this driver */
+struct link_shbuf_data {
+
+ /* Outgoing vIRQ */
+ u32 virqline;
+
+ /* Incoming vIRQ */
+ int virq;
+ atomic64_t virq_payload;
+ bool virq_pending;
+ wait_queue_head_t virq_wq;
+
+ /* Shared memory region */
+ void *base;
+ fmode_t permissions;
+ struct resource buffer;
+
+ /* Device data */
+ dev_t devt;
+ struct device *dev;
+ struct cdev cdev;
+
+};
+
+static bool link_shbuf_data_invariant(const struct link_shbuf_data *priv)
+{
+ if (!priv)
+ return false;
+
+ if (!priv->base || (uintptr_t)priv->base % PAGE_SIZE != 0)
+ return false;
+
+ if (resource_size(&priv->buffer) == 0)
+ return false;
+
+ if (!priv->dev)
+ return false;
+
+ return true;
+}
+
+static bool link_shbuf_valid_access(size_t size, loff_t pos, size_t count)
+{
+ return pos < size && count <= size && size - count >= pos;
+}
+
+static ssize_t link_shbuf_read(struct file *file, char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ long remaining;
+ const struct link_shbuf_data *priv;
+
+ /* The file should have been opened with read access to reach here */
+ if (WARN_ON(!(file->f_mode & FMODE_READ)))
+ return -EINVAL;
+
+ priv = file->private_data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return -EINVAL;
+
+ if (!link_shbuf_valid_access(resource_size(&priv->buffer), *ppos, count))
+ return -EINVAL;
+
+ remaining = copy_to_user(buffer, priv->base + *ppos, count);
+ *ppos += count - remaining;
+ return count - remaining;
+}
+
+static ssize_t link_shbuf_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ long remaining;
+ const struct link_shbuf_data *priv;
+
+ /* The file should have been opened with write access to reach here */
+ if (WARN_ON(!(file->f_mode & FMODE_WRITE)))
+ return -EINVAL;
+
+ priv = file->private_data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return -EINVAL;
+
+ if (!link_shbuf_valid_access(resource_size(&priv->buffer), *ppos, count))
+ return -EINVAL;
+
+ remaining = copy_from_user(priv->base + *ppos, buffer, count);
+ *ppos += count - remaining;
+ return count - remaining;
+}
+
+static unsigned int link_shbuf_poll(struct file *file, poll_table *table)
+{
+ struct link_shbuf_data *priv;
+ unsigned int mask;
+
+ priv = file->private_data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return POLLERR;
+
+ poll_wait(file, &priv->virq_wq, table);
+
+ /* The shared memory is always considered ready for reading and writing. */
+ mask = POLLIN | POLLRDNORM | POLLOUT | POLLWRNORM;
+
+ if (priv->virq_pending)
+ mask |= POLLPRI;
+
+ return mask;
+}
+
+static long link_shbuf_ioctl_irq_tx(const struct link_shbuf_data *priv,
+ unsigned long arg)
+{
+ okl4_error_t err;
+ u64 payload;
+ const u64 __user *user_arg = (const u64 __user*)arg;
+
+ if (priv->virqline == NO_OUTGOING_IRQ)
+ return -EINVAL;
+
+#if defined(CONFIG_ARM) && (LINUX_VERSION_CODE < KERNEL_VERSION(3, 17, 0))
+ if (copy_from_user(&payload, user_arg, sizeof(payload)))
+ return -EFAULT;
+#else
+ if (get_user(payload, user_arg))
+ return -EFAULT;
+#endif
+
+ err = _okl4_sys_vinterrupt_raise(priv->virqline, payload);
+ if (WARN_ON(err != OKL4_OK))
+ return -EINVAL;
+
+ return 0;
+}
+
+static long link_shbuf_ioctl_irq_clr(struct link_shbuf_data *priv,
+ unsigned long arg)
+{
+ u64 payload;
+ u64 __user *user_arg = (u64 __user*)arg;
+
+ /*
+ * Check validity of the user pointer before clearing the interrupt to avoid
+ * races involved with having to undo the latter.
+ */
+ if (!access_ok(VERIFY_WRITE, user_arg, sizeof(*user_arg)))
+ return -EFAULT;
+
+ /*
+ * Note that the clearing of the pending flag can race with the setting of
+ * this flag in the IRQ handler. It is up to the user to coordinate these
+ * actions.
+ */
+ priv->virq_pending = false;
+ smp_rmb();
+ payload = atomic64_xchg(&priv->virq_payload, 0);
+
+ /* We've already checked that this access is OK, so no need for put_user. */
+ if (__put_user(payload, user_arg))
+ return -EFAULT;
+
+ return 0;
+}
+
+static long link_shbuf_ioctl(struct file *file, unsigned int request,
+ unsigned long arg)
+{
+ struct link_shbuf_data *priv;
+
+ priv = file->private_data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return -EINVAL;
+
+ /* We only support two ioctls */
+ switch (request) {
+
+ case OKL4_LINK_SHBUF_IOCTL_IRQ_TX:
+ return link_shbuf_ioctl_irq_tx(priv, arg);
+
+ case OKL4_LINK_SHBUF_IOCTL_IRQ_CLR:
+ return link_shbuf_ioctl_irq_clr(priv, arg);
+
+ }
+
+ /*
+ * Handy for debugging when userspace is linking against ioctl headers from
+ * a different kernel revision.
+ */
+ dev_dbg(priv->dev, "ioctl request 0x%x received which did not match either "
+ "OKL4_LINK_SHBUF_IOCTL_IRQ_TX (0x%x) or OKL4_LINK_SHBUF_IOCTL_IRQ_CLR "
+ "(0x%x)\n", request, (unsigned)OKL4_LINK_SHBUF_IOCTL_IRQ_TX,
+ (unsigned)OKL4_LINK_SHBUF_IOCTL_IRQ_CLR);
+
+ return -EINVAL;
+}
+
+static int link_shbuf_mmap(struct file *file, struct vm_area_struct *vma)
+{
+ const struct link_shbuf_data *priv;
+ unsigned long offset, pfn, flags;
+ size_t size;
+ pgprot_t prot;
+
+ /* Our caller should have taken the MM semaphore. */
+ if (WARN_ON(!rwsem_is_locked(&vma->vm_mm->mmap_sem)))
+ return -EINVAL;
+
+ /*
+ * The file should have been opened with a superset of the mmap requested
+ * permissions.
+ */
+ flags = vma->vm_flags;
+ if (WARN_ON((flags & VM_READ) && !(file->f_mode & FMODE_READ)))
+ return -EINVAL;
+ if (WARN_ON((flags & VM_WRITE) && !(file->f_mode & FMODE_WRITE)))
+ return -EINVAL;
+ if (WARN_ON((flags & VM_EXEC) && !(file->f_mode & FMODE_EXEC)))
+ return -EINVAL;
+
+ /* Retrieve our private data. */
+ priv = file->private_data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return -EINVAL;
+
+ /* Check the mmap request is within bounds. */
+ size = vma->vm_end - vma->vm_start;
+ offset = vma->vm_pgoff << PAGE_SHIFT;
+ if (!link_shbuf_valid_access(resource_size(&priv->buffer), offset, size))
+ return -EINVAL;
+
+ pfn = (priv->buffer.start + offset) >> PAGE_SHIFT;
+ prot = vm_get_page_prot(flags);
+
+ return remap_pfn_range(vma, vma->vm_start, pfn, size, prot);
+}
+
+static bool link_shbuf_access_ok(fmode_t allowed, fmode_t request)
+{
+ static const fmode_t ACCESS_MASK = FMODE_READ|FMODE_WRITE|FMODE_EXEC;
+ fmode_t relevant = request & ACCESS_MASK;
+ return (relevant & allowed) == relevant;
+}
+
+static int link_shbuf_open(struct inode *inode, struct file *file)
+{
+ struct cdev *cdev;
+ struct link_shbuf_data *priv;
+
+ /* Retrieve a pointer to our private data */
+ cdev = inode->i_cdev;
+ priv = container_of(cdev, struct link_shbuf_data, cdev);
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return -EINVAL;
+
+ if (!link_shbuf_access_ok(priv->permissions, file->f_mode))
+ return -EACCES;
+
+ file->private_data = priv;
+
+ return 0;
+}
+
+static const struct file_operations link_shbuf_ops = {
+ .owner = THIS_MODULE,
+ .read = link_shbuf_read,
+ .write = link_shbuf_write,
+ .poll = link_shbuf_poll,
+ .unlocked_ioctl = link_shbuf_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = link_shbuf_ioctl,
+#endif
+#ifdef CONFIG_MMU
+ .mmap = link_shbuf_mmap,
+#endif
+ .open = link_shbuf_open,
+};
+
+/*
+ * Interrupt handler.
+ *
+ * This function will be called when our link partner uses the ioctl on their
+ * shared memory device to send an outgoing interrupt.
+ */
+static irqreturn_t link_shbuf_irq_handler(int irq, void *data)
+{
+ u64 payload, old, new;
+ struct _okl4_sys_interrupt_get_payload_return _payload;
+
+ /* Retrieve a pointer to our private data. */
+ struct link_shbuf_data *priv = data;
+ if (WARN_ON(!link_shbuf_data_invariant(priv)))
+ return IRQ_NONE;
+
+ /*
+ * We should only ever be handling a single interrupt, and only if there
+ * was an incoming interrupt in the configuration.
+ */
+ if (WARN_ON(priv->virq < 0 || priv->virq != irq))
+ return IRQ_NONE;
+
+ _payload = _okl4_sys_interrupt_get_payload(irq);
+ payload = (u64)_payload.payload;
+
+ /*
+ * At this point, it is possible the pending flag is already set. It is up to
+ * the user to synchronise their transmission and acknowledgement of
+ * interrupts.
+ */
+
+ /* We open code atomic64_or which is not universally available. */
+ do {
+ old = atomic64_read(&priv->virq_payload);
+ new = old | payload;
+ } while (atomic64_cmpxchg(&priv->virq_payload, old, new) != old);
+ smp_wmb();
+ priv->virq_pending = true;
+
+ wake_up_interruptible(&priv->virq_wq);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Allocate a unique device number for this device.
+ *
+ * Note that this function needs to lock its access to link_shbuf_dev as there
+ * may be multiple threads attempting to acquire a new device number.
+ */
+static int link_shbuf_allocate_device(dev_t *devt)
+{
+ int ret = 0;
+ dev_t next;
+
+ spin_lock(&device_number_allocate);
+
+ *devt = link_shbuf_dev;
+ next = MKDEV(MAJOR(link_shbuf_dev), MINOR(link_shbuf_dev) + 1);
+ /* Check for overflow */
+ if (MINOR(next) != MINOR(link_shbuf_dev) + 1)
+ ret = -ENOSPC;
+ else
+ link_shbuf_dev = next;
+
+ spin_unlock(&device_number_allocate);
+
+ return ret;
+}
+
+/*
+ * Discover and add a new shared-buffer link.
+ *
+ * In the following function, we are expecting to parse device tree entries
+ * looking like the following:
+ *
+ * hypervisor {
+ * ...
+ * interrupt-line@1d {
+ * compatible = "okl,microvisor-interrupt-line",
+ * "okl,microvisor-capability";
+ * phandle = <0x7>;
+ * reg = <0x1d>;
+ * label = "foo_virqline";
+ * };
+ * ;
+ *
+ * foo@41003000 {
+ * compatible = "okl,microvisor-link-shbuf",
+ * "okl,microvisor-shared-memory";
+ * phandle = <0xd>;
+ * reg = <0x0 0x41003000 0x2000>;
+ * label = "foo";
+ * okl,rwx = <0x6>;
+ * okl,interrupt-line = <0x7>;
+ * interrupts = <0x0 0x4 0x1>;
+ * interrupt-parent = <0x1>;
+ * };
+ */
+static int link_shbuf_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct device_node *node, *virqline;
+ struct link_shbuf_data *priv;
+ const char *name;
+ u32 permissions;
+
+ node = pdev->dev.of_node;
+
+ if (!node)
+ return -ENODEV;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ /*
+ * Retrieve the outgoing vIRQ cap. Note, this is configurable and we
+ * anticipate that it may not exist.
+ */
+ virqline = of_parse_phandle(node, "okl,interrupt-line", 0);
+ if (!virqline) {
+ priv->virqline = NO_OUTGOING_IRQ;
+ } else {
+ ret = of_property_read_u32(virqline, "reg", &priv->virqline);
+ if (ret < 0 || priv->virqline == OKL4_KCAP_INVALID) {
+ of_node_put(virqline);
+ ret = -ENODEV;
+ goto err_free_dev;
+ }
+ }
+ of_node_put(virqline);
+
+ /* Retrieve the incoming vIRQ number. Again, this is configurable and we
+ * anticipate that it may not exist.
+ */
+ priv->virq = platform_get_irq(pdev, 0);
+ if (priv->virq < 0)
+ priv->virq = NO_INCOMING_IRQ;
+
+ /* If we have a valid incoming vIRQ, register to handle it. */
+ if (priv->virq >= 0) {
+ ret = devm_request_irq(&pdev->dev, priv->virq, link_shbuf_irq_handler,
+ 0, dev_name(&pdev->dev), priv);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed request for IRQ\n");
+ goto err_free_dev;
+ }
+ }
+
+ init_waitqueue_head(&priv->virq_wq);
+ priv->virq_pending = false;
+
+ /* Retrieve information about the shared memory region. */
+ ret = of_address_to_resource(node, 0, &priv->buffer);
+ if (ret < 0)
+ goto err_free_irq;
+ /*
+ * We expect the Elfweaver to have validated that we have a non-NULL,
+ * page-aligned region.
+ */
+ if (WARN_ON(priv->buffer.start == 0) ||
+ WARN_ON(resource_size(&priv->buffer) % PAGE_SIZE != 0))
+ goto err_free_irq;
+ if (!devm_request_mem_region(&pdev->dev, priv->buffer.start,
+ resource_size(&priv->buffer), dev_name(&pdev->dev))) {
+ ret = -ENODEV;
+ goto err_free_irq;
+ }
+ priv->base = devm_ioremap(&pdev->dev, priv->buffer.start,
+ resource_size(&priv->buffer));
+ if (!priv->base)
+ goto err_release_region;
+
+ /* Read the permissions of the shared memory region. */
+ ret = of_property_read_u32(node, "okl,rwx", &permissions);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read shared memory permissions\n");
+ goto err_unmap_dev;
+ }
+ if (permissions & ~S_IRWXO) {
+ ret = -EINVAL;
+ goto err_unmap_dev;
+ }
+ priv->permissions = ((permissions & S_IROTH) ? FMODE_READ : 0) |
+ ((permissions & S_IWOTH) ? FMODE_WRITE : 0) |
+ ((permissions & S_IXOTH) ? FMODE_EXEC : 0);
+ if (WARN_ON(priv->permissions == 0)) {
+ ret = -EINVAL;
+ goto err_unmap_dev;
+ }
+
+ /* Retrieve the label of this device. This will be the "name" attribute of
+ * the corresponding "link" tag in the system's XML specification.
+ */
+ ret = of_property_read_string(node, "label", &name);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to read label\n");
+ goto err_unmap_dev;
+ }
+
+ cdev_init(&priv->cdev, &link_shbuf_ops);
+ ret = cdev_add(&priv->cdev, link_shbuf_dev, 1);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to add char dev region\n");
+ goto err_unmap_dev;
+ }
+
+ ret = link_shbuf_allocate_device(&priv->devt);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "failed to allocate new device number\n");
+ goto err_unmap_dev;
+ }
+
+ /* We're now ready to create the device itself. */
+ BUG_ON(name == NULL);
+ priv->dev = device_create(link_shbuf_class, &pdev->dev, priv->devt,
+ priv, "%s%s", DEV_PREFIX, name);
+ if (IS_ERR(priv->dev)) {
+ dev_err(&pdev->dev, "failed to create device\n");
+ ret = PTR_ERR(priv->dev);
+ goto err_del_dev;
+ }
+
+ dev_set_drvdata(&pdev->dev, priv);
+
+ return 0;
+
+err_del_dev:
+ cdev_del(&priv->cdev);
+err_unmap_dev:
+ devm_iounmap(&pdev->dev, priv->base);
+err_release_region:
+ devm_release_mem_region(&pdev->dev, priv->buffer.start,
+ resource_size(&priv->buffer));
+err_free_irq:
+ if (priv->virq != NO_INCOMING_IRQ)
+ devm_free_irq(&pdev->dev, priv->virq, priv);
+err_free_dev:
+ devm_kfree(&pdev->dev, priv);
+ return ret;
+}
+
+static int link_shbuf_remove(struct platform_device *pdev)
+{
+ struct link_shbuf_data *priv;
+
+ priv = dev_get_drvdata(&pdev->dev);
+ WARN_ON(!link_shbuf_data_invariant(priv));
+
+ device_destroy(link_shbuf_class, priv->devt);
+
+ cdev_del(&priv->cdev);
+
+ /*
+ * None of the following is strictly required, as these are all managed
+ * resources, but we clean it up anyway for clarity.
+ */
+
+ devm_iounmap(&pdev->dev, priv->base);
+
+ devm_release_mem_region(&pdev->dev, priv->buffer.start,
+ resource_size(&priv->buffer));
+
+ if (priv->virq != NO_INCOMING_IRQ)
+ devm_free_irq(&pdev->dev, priv->virq, priv);
+
+ devm_kfree(&pdev->dev, priv);
+
+ return 0;
+}
+
+static struct platform_driver of_plat_link_shbuf_driver = {
+ .driver = {
+ .name = "okl4-shbuf",
+ .owner = THIS_MODULE,
+ .of_match_table = okl4_link_shbuf_match,
+ },
+ .probe = link_shbuf_probe,
+ .remove = link_shbuf_remove,
+};
+
+/* Maximum number of minor device numbers */
+enum {
+ MAX_MINOR = 1 << MINORBITS,
+};
+
+static int __init okl4_link_shbuf_init(void)
+{
+ int ret;
+
+ link_shbuf_class = class_create(THIS_MODULE, DEVICE_NAME);
+ if (IS_ERR(link_shbuf_class)) {
+ pr_err("failed to create class\n");
+ ret = PTR_ERR(link_shbuf_class);
+ return ret;
+ }
+
+ ret = alloc_chrdev_region(&link_shbuf_dev, 0, MAX_MINOR, DEVICE_NAME);
+ if (ret < 0) {
+ pr_err("failed to allocate char dev region\n");
+ goto err_destroy_class;
+ }
+
+ ret = platform_driver_register(&of_plat_link_shbuf_driver);
+ if (ret < 0) {
+ pr_err("failed to register driver\n");
+ goto err_unregister_dev_region;
+ }
+
+ spin_lock_init(&device_number_allocate);
+
+ return 0;
+
+err_unregister_dev_region:
+ unregister_chrdev_region(link_shbuf_dev, MAX_MINOR);
+err_destroy_class:
+ class_destroy(link_shbuf_class);
+ return ret;
+}
+module_init(okl4_link_shbuf_init);
+
+static void __exit okl4_link_shbuf_exit(void)
+{
+ platform_driver_unregister(&of_plat_link_shbuf_driver);
+ unregister_chrdev_region(link_shbuf_dev, MAX_MINOR);
+ class_destroy(link_shbuf_class);
+}
+module_exit(okl4_link_shbuf_exit);
+
+MODULE_DESCRIPTION("OKL4 shared buffer link driver");
+MODULE_AUTHOR("Cog Systems Pty Ltd");
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 45d3301..d9aa6e2 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -146,6 +146,8 @@
static DEFINE_MUTEX(qsee_bw_mutex);
static DEFINE_MUTEX(app_access_lock);
static DEFINE_MUTEX(clk_access_lock);
+static DEFINE_MUTEX(listener_access_lock);
+
struct sglist_info {
uint32_t indexAndFlags;
@@ -185,15 +187,21 @@
size_t sb_length;
struct ion_handle *ihandle; /* Retrieve phy addr */
wait_queue_head_t rcv_req_wq;
- /* rcv_req_flag: -1: not ready; 0: ready and empty; 1: received req */
+ /* rcv_req_flag: 0: ready and empty; 1: received req */
int rcv_req_flag;
int send_resp_flag;
bool listener_in_use;
/* wq for thread blocked on this listener*/
wait_queue_head_t listener_block_app_wq;
- struct sglist_info sglistinfo_ptr[MAX_ION_FD];
- uint32_t sglist_cnt;
- int abort;
+ struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+ uint32_t sglist_cnt;
+ int abort;
+ bool unregister_pending;
+};
+
+struct qseecom_unregister_pending_list {
+ struct list_head list;
+ struct qseecom_dev_handle *data;
};
struct qseecom_registered_app_list {
@@ -241,7 +249,6 @@
struct qseecom_control {
struct ion_client *ion_clnt; /* Ion client */
struct list_head registered_listener_list_head;
- spinlock_t registered_listener_list_lock;
struct list_head registered_app_list_head;
spinlock_t registered_app_list_lock;
@@ -288,6 +295,9 @@
atomic_t qseecom_state;
int is_apps_region_protected;
bool smcinvoke_support;
+
+ struct list_head unregister_lsnr_pending_list_head;
+ wait_queue_head_t register_lsnr_pending_wq;
};
struct qseecom_sec_buf_fd_info {
@@ -316,6 +326,7 @@
struct qseecom_listener_handle {
u32 id;
+ bool unregister_pending;
};
static struct qseecom_control qseecom;
@@ -589,13 +600,10 @@
}
qseecom.smcinvoke_support = true;
smc_id = TZ_OS_REGISTER_LISTENER_SMCINVOKE_ID;
- __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
ret = scm_call2(smc_id, &desc);
- if (ret) {
+ if (ret && ret != -EBUSY) {
qseecom.smcinvoke_support = false;
smc_id = TZ_OS_REGISTER_LISTENER_ID;
- __qseecom_reentrancy_check_if_no_app_blocked(
- smc_id);
ret = scm_call2(smc_id, &desc);
}
break;
@@ -608,7 +616,6 @@
smc_id = TZ_OS_DEREGISTER_LISTENER_ID;
desc.arginfo = TZ_OS_DEREGISTER_LISTENER_ID_PARAM_ID;
desc.args[0] = req->listener_id;
- __qseecom_reentrancy_check_if_no_app_blocked(smc_id);
ret = scm_call2(smc_id, &desc);
break;
}
@@ -1069,42 +1076,18 @@
return qseecom_scm_call2(svc_id, tz_cmd_id, cmd_buf, resp_buf);
}
-static int __qseecom_is_svc_unique(struct qseecom_dev_handle *data,
- struct qseecom_register_listener_req *svc)
-{
- struct qseecom_registered_listener_list *ptr;
- int unique = 1;
- unsigned long flags;
-
- spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
- list_for_each_entry(ptr, &qseecom.registered_listener_list_head, list) {
- if (ptr->svc.listener_id == svc->listener_id) {
- pr_err("Service id: %u is already registered\n",
- ptr->svc.listener_id);
- unique = 0;
- break;
- }
- }
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
- return unique;
-}
-
static struct qseecom_registered_listener_list *__qseecom_find_svc(
int32_t listener_id)
{
struct qseecom_registered_listener_list *entry = NULL;
- unsigned long flags;
- spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
list_for_each_entry(entry,
&qseecom.registered_listener_list_head, list) {
if (entry->svc.listener_id == listener_id)
break;
}
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
-
if ((entry != NULL) && (entry->svc.listener_id != listener_id)) {
- pr_err("Service id: %u is not found\n", listener_id);
+ pr_debug("Service id: %u is not found\n", listener_id);
return NULL;
}
@@ -1183,9 +1166,9 @@
void __user *argp)
{
int ret = 0;
- unsigned long flags;
struct qseecom_register_listener_req rcvd_lstnr;
struct qseecom_registered_listener_list *new_entry;
+ struct qseecom_registered_listener_list *ptr_svc;
ret = copy_from_user(&rcvd_lstnr, argp, sizeof(rcvd_lstnr));
if (ret) {
@@ -1197,18 +1180,37 @@
return -EFAULT;
data->listener.id = rcvd_lstnr.listener_id;
- if (!__qseecom_is_svc_unique(data, &rcvd_lstnr)) {
- pr_err("Service %d is not unique and failed to register\n",
+
+ ptr_svc = __qseecom_find_svc(rcvd_lstnr.listener_id);
+ if (ptr_svc) {
+ if (ptr_svc->unregister_pending == false) {
+ pr_err("Service %d is not unique\n",
rcvd_lstnr.listener_id);
data->released = true;
return -EBUSY;
+ } else {
+ /*wait until listener is unregistered*/
+ pr_debug("register %d has to wait\n",
+ rcvd_lstnr.listener_id);
+ mutex_unlock(&listener_access_lock);
+ ret = wait_event_freezable(
+ qseecom.register_lsnr_pending_wq,
+ list_empty(
+ &qseecom.unregister_lsnr_pending_list_head));
+ if (ret) {
+ pr_err("interrupted register_pending_wq %d\n",
+ rcvd_lstnr.listener_id);
+ mutex_lock(&listener_access_lock);
+ return -ERESTARTSYS;
+ }
+ mutex_lock(&listener_access_lock);
+ }
}
-
new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
if (!new_entry)
return -ENOMEM;
memcpy(&new_entry->svc, &rcvd_lstnr, sizeof(rcvd_lstnr));
- new_entry->rcv_req_flag = -1;
+ new_entry->rcv_req_flag = 0;
new_entry->svc.listener_id = rcvd_lstnr.listener_id;
new_entry->sb_length = rcvd_lstnr.sb_size;
@@ -1224,45 +1226,20 @@
init_waitqueue_head(&new_entry->listener_block_app_wq);
new_entry->send_resp_flag = 0;
new_entry->listener_in_use = false;
- spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
list_add_tail(&new_entry->list, &qseecom.registered_listener_list_head);
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
pr_warn("Service %d is registered\n", rcvd_lstnr.listener_id);
return ret;
}
-static void __qseecom_listener_abort_all(int abort)
-{
- struct qseecom_registered_listener_list *entry = NULL;
- unsigned long flags;
-
- spin_lock_irqsave(&qseecom.registered_listener_list_lock, flags);
- list_for_each_entry(entry,
- &qseecom.registered_listener_list_head, list) {
- pr_debug("set abort %d for listener %d\n",
- abort, entry->svc.listener_id);
- entry->abort = abort;
- }
- if (abort)
- wake_up_interruptible_all(&qseecom.send_resp_wq);
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock, flags);
-}
-
-static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+static int __qseecom_unregister_listener(struct qseecom_dev_handle *data,
+ struct qseecom_registered_listener_list *ptr_svc)
{
int ret = 0;
struct qseecom_register_listener_ireq req;
- struct qseecom_registered_listener_list *ptr_svc = NULL;
struct qseecom_command_scm_resp resp;
struct ion_handle *ihandle = NULL; /* Retrieve phy addr */
- ptr_svc = __qseecom_find_svc(data->listener.id);
- if (!ptr_svc) {
- pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
- return -ENODATA;
- }
-
req.qsee_cmd_id = QSEOS_DEREGISTER_LISTENER;
req.listener_id = data->listener.id;
resp.result = QSEOS_RESULT_INCOMPLETE;
@@ -1272,6 +1249,8 @@
if (ret) {
pr_err("scm_call() failed with err: %d (lstnr id=%d)\n",
ret, data->listener.id);
+ if (ret == -EBUSY)
+ return ret;
goto exit;
}
@@ -1283,7 +1262,6 @@
}
data->abort = 1;
- ptr_svc->abort = 1;
wake_up_all(&ptr_svc->rcv_req_wq);
while (atomic_read(&data->ioctl_count) > 1) {
@@ -1310,6 +1288,74 @@
return ret;
}
+static int qseecom_unregister_listener(struct qseecom_dev_handle *data)
+{
+ struct qseecom_registered_listener_list *ptr_svc = NULL;
+ struct qseecom_unregister_pending_list *entry = NULL;
+
+ ptr_svc = __qseecom_find_svc(data->listener.id);
+ if (!ptr_svc) {
+ pr_err("Unregiser invalid listener ID %d\n", data->listener.id);
+ return -ENODATA;
+ }
+ /* stop CA thread waiting for listener response */
+ ptr_svc->abort = 1;
+ wake_up_interruptible_all(&qseecom.send_resp_wq);
+
+ /* return directly if pending*/
+ if (ptr_svc->unregister_pending)
+ return 0;
+
+ /*add unregistration into pending list*/
+ entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry)
+ return -ENOMEM;
+ entry->data = data;
+ list_add_tail(&entry->list,
+ &qseecom.unregister_lsnr_pending_list_head);
+ ptr_svc->unregister_pending = true;
+ pr_debug("unregister %d pending\n", data->listener.id);
+ return 0;
+}
+
+static void __qseecom_processing_pending_lsnr_unregister(void)
+{
+ struct qseecom_unregister_pending_list *entry = NULL;
+ struct qseecom_registered_listener_list *ptr_svc = NULL;
+ struct list_head *pos;
+ int ret = 0;
+
+ mutex_lock(&listener_access_lock);
+ while (!list_empty(&qseecom.unregister_lsnr_pending_list_head)) {
+ pos = qseecom.unregister_lsnr_pending_list_head.next;
+ entry = list_entry(pos,
+ struct qseecom_unregister_pending_list, list);
+ if (entry && entry->data) {
+ pr_debug("process pending unregister %d\n",
+ entry->data->listener.id);
+ ptr_svc = __qseecom_find_svc(
+ entry->data->listener.id);
+ if (ptr_svc) {
+ ret = __qseecom_unregister_listener(
+ entry->data, ptr_svc);
+ if (ret == -EBUSY) {
+ pr_debug("unregister %d pending again\n",
+ entry->data->listener.id);
+ mutex_unlock(&listener_access_lock);
+ return;
+ }
+ } else
+ pr_err("invalid listener %d\n",
+ entry->data->listener.id);
+ kzfree(entry->data);
+ }
+ list_del(pos);
+ kzfree(entry);
+ }
+ mutex_unlock(&listener_access_lock);
+ wake_up_interruptible(&qseecom.register_lsnr_pending_wq);
+}
+
static int __qseecom_set_msm_bus_request(uint32_t mode)
{
int ret = 0;
@@ -1644,19 +1690,12 @@
}
}
-static int __is_listener_rcv_wq_not_ready(
- struct qseecom_registered_listener_list *ptr_svc)
-{
- return ptr_svc->rcv_req_flag == -1;
-}
-
static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
struct qseecom_command_scm_resp *resp)
{
int ret = 0;
int rc = 0;
uint32_t lstnr;
- unsigned long flags;
struct qseecom_client_listener_data_irsp send_data_rsp = {0};
struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
= {0};
@@ -1667,30 +1706,23 @@
void *cmd_buf = NULL;
size_t cmd_len;
struct sglist_info *table = NULL;
- bool not_ready = false;
+ qseecom.app_block_ref_cnt++;
while (resp->result == QSEOS_RESULT_INCOMPLETE) {
lstnr = resp->data;
/*
* Wake up blocking lsitener service with the lstnr id
*/
- spin_lock_irqsave(&qseecom.registered_listener_list_lock,
- flags);
+ mutex_lock(&listener_access_lock);
list_for_each_entry(ptr_svc,
&qseecom.registered_listener_list_head, list) {
if (ptr_svc->svc.listener_id == lstnr) {
- if (__is_listener_rcv_wq_not_ready(ptr_svc)) {
- not_ready = true;
- break;
- }
ptr_svc->listener_in_use = true;
ptr_svc->rcv_req_flag = 1;
wake_up_interruptible(&ptr_svc->rcv_req_wq);
break;
}
}
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
- flags);
if (ptr_svc == NULL) {
pr_err("Listener Svc %d does not exist\n", lstnr);
@@ -1716,22 +1748,13 @@
}
if (ptr_svc->abort == 1) {
- pr_err("Service %d abort %d\n",
+ pr_debug("Service %d abort %d\n",
lstnr, ptr_svc->abort);
rc = -ENODEV;
status = QSEOS_RESULT_FAILURE;
goto err_resp;
}
- if (not_ready) {
- pr_err("Service %d is not ready to receive request\n",
- lstnr);
- rc = -ENOENT;
- status = QSEOS_RESULT_FAILURE;
- goto err_resp;
-
- }
-
pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
/* initialize the new signal mask with all signals*/
@@ -1739,6 +1762,7 @@
/* block all signals */
sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
+ mutex_unlock(&listener_access_lock);
do {
/*
* When reentrancy is not supported, check global
@@ -1759,7 +1783,7 @@
break;
}
} while (1);
-
+ mutex_lock(&listener_access_lock);
/* restore signal mask */
sigprocmask(SIG_SETMASK, &old_sigset, NULL);
if (data->abort || ptr_svc->abort) {
@@ -1815,14 +1839,14 @@
ION_IOC_CLEAN_INV_CACHES);
if (ret) {
pr_err("cache operation failed %d\n", ret);
- return ret;
+ goto exit;
}
}
if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE)) {
ret = __qseecom_enable_clk(CLK_QSEE);
if (ret)
- return ret;
+ goto exit;
}
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
@@ -1836,7 +1860,7 @@
ret, data->client.app_id);
if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
__qseecom_disable_clk(CLK_QSEE);
- return ret;
+ goto exit;
}
pr_debug("resp status %d, res= %d, app_id = %d, lstr = %d\n",
status, resp->result, data->client.app_id, lstnr);
@@ -1845,11 +1869,15 @@
pr_err("fail:resp res= %d,app_id = %d,lstr = %d\n",
resp->result, data->client.app_id, lstnr);
ret = -EINVAL;
+ goto exit;
}
+exit:
+ mutex_unlock(&listener_access_lock);
if ((lstnr == RPMB_SERVICE) || (lstnr == SSD_SERVICE))
__qseecom_disable_clk(CLK_QSEE);
}
+ qseecom.app_block_ref_cnt--;
if (rc)
return rc;
@@ -1902,10 +1930,12 @@
do {
session_id = resp->resp_type;
+ mutex_lock(&listener_access_lock);
list_ptr = __qseecom_find_svc(resp->data);
if (!list_ptr) {
pr_err("Invalid listener ID %d\n", resp->data);
ret = -ENODATA;
+ mutex_unlock(&listener_access_lock);
goto exit;
}
ptr_app->blocked_on_listener_id = resp->data;
@@ -1921,11 +1951,13 @@
do {
qseecom.app_block_ref_cnt++;
ptr_app->app_blocked = true;
+ mutex_unlock(&listener_access_lock);
mutex_unlock(&app_access_lock);
wait_event_freezable(
list_ptr->listener_block_app_wq,
!list_ptr->listener_in_use);
mutex_lock(&app_access_lock);
+ mutex_lock(&listener_access_lock);
ptr_app->app_blocked = false;
qseecom.app_block_ref_cnt--;
} while (list_ptr->listener_in_use);
@@ -1958,9 +1990,11 @@
if (ret) {
pr_err("unblock app %d or session %d fail\n",
data->client.app_id, session_id);
+ mutex_unlock(&listener_access_lock);
goto exit;
}
}
+ mutex_unlock(&listener_access_lock);
resp->result = continue_resp.result;
resp->resp_type = continue_resp.resp_type;
resp->data = continue_resp.data;
@@ -1982,7 +2016,6 @@
int ret = 0;
int rc = 0;
uint32_t lstnr;
- unsigned long flags;
struct qseecom_client_listener_data_irsp send_data_rsp = {0};
struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit
= {0};
@@ -1993,30 +2026,22 @@
void *cmd_buf = NULL;
size_t cmd_len;
struct sglist_info *table = NULL;
- bool not_ready = false;
while (ret == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
lstnr = resp->data;
/*
* Wake up blocking lsitener service with the lstnr id
*/
- spin_lock_irqsave(&qseecom.registered_listener_list_lock,
- flags);
+ mutex_lock(&listener_access_lock);
list_for_each_entry(ptr_svc,
&qseecom.registered_listener_list_head, list) {
if (ptr_svc->svc.listener_id == lstnr) {
- if (__is_listener_rcv_wq_not_ready(ptr_svc)) {
- not_ready = true;
- break;
- }
ptr_svc->listener_in_use = true;
ptr_svc->rcv_req_flag = 1;
wake_up_interruptible(&ptr_svc->rcv_req_wq);
break;
}
}
- spin_unlock_irqrestore(&qseecom.registered_listener_list_lock,
- flags);
if (ptr_svc == NULL) {
pr_err("Listener Svc %d does not exist\n", lstnr);
@@ -2042,22 +2067,13 @@
}
if (ptr_svc->abort == 1) {
- pr_err("Service %d abort %d\n",
+ pr_debug("Service %d abort %d\n",
lstnr, ptr_svc->abort);
rc = -ENODEV;
status = QSEOS_RESULT_FAILURE;
goto err_resp;
}
- if (not_ready) {
- pr_err("Service %d is not ready to receive request\n",
- lstnr);
- rc = -ENOENT;
- status = QSEOS_RESULT_FAILURE;
- goto err_resp;
-
- }
-
pr_debug("waking up rcv_req_wq and waiting for send_resp_wq\n");
/* initialize the new signal mask with all signals*/
@@ -2067,6 +2083,7 @@
sigprocmask(SIG_SETMASK, &new_sigset, &old_sigset);
/* unlock mutex btw waking listener and sleep-wait */
+ mutex_unlock(&listener_access_lock);
mutex_unlock(&app_access_lock);
do {
if (!wait_event_freezable(qseecom.send_resp_wq,
@@ -2077,6 +2094,7 @@
} while (1);
/* lock mutex again after resp sent */
mutex_lock(&app_access_lock);
+ mutex_lock(&listener_access_lock);
ptr_svc->send_resp_flag = 0;
qseecom.send_resp_flag = 0;
@@ -2138,7 +2156,7 @@
if (lstnr == RPMB_SERVICE) {
ret = __qseecom_enable_clk(CLK_QSEE);
if (ret)
- return ret;
+ goto exit;
}
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
@@ -2183,6 +2201,7 @@
goto exit;
}
exit:
+ mutex_unlock(&listener_access_lock);
if (lstnr == RPMB_SERVICE)
__qseecom_disable_clk(CLK_QSEE);
@@ -3271,6 +3290,7 @@
pr_err("cache operation failed %d\n", ret2);
return ret2;
}
+ __qseecom_processing_pending_lsnr_unregister();
return ret;
}
@@ -3866,7 +3886,7 @@
int ret;
ret = (svc->rcv_req_flag == 1);
- return ret || data->abort || svc->abort;
+ return ret || data->abort;
}
static int qseecom_receive_req(struct qseecom_dev_handle *data)
@@ -3874,14 +3894,14 @@
int ret = 0;
struct qseecom_registered_listener_list *this_lstnr;
+ mutex_lock(&listener_access_lock);
this_lstnr = __qseecom_find_svc(data->listener.id);
if (!this_lstnr) {
pr_err("Invalid listener ID\n");
+ mutex_unlock(&listener_access_lock);
return -ENODATA;
}
-
- if (this_lstnr->rcv_req_flag == -1)
- this_lstnr->rcv_req_flag = 0;
+ mutex_unlock(&listener_access_lock);
while (1) {
if (wait_event_freezable(this_lstnr->rcv_req_wq,
@@ -3890,16 +3910,17 @@
pr_warn("Interrupted: exiting Listener Service = %d\n",
(uint32_t)data->listener.id);
/* woken up for different reason */
- this_lstnr->rcv_req_flag = -1;
return -ERESTARTSYS;
}
- if (data->abort || this_lstnr->abort) {
+ if (data->abort) {
pr_err("Aborting Listener Service = %d\n",
(uint32_t)data->listener.id);
return -ENODEV;
}
+ mutex_lock(&listener_access_lock);
this_lstnr->rcv_req_flag = 0;
+ mutex_unlock(&listener_access_lock);
break;
}
return ret;
@@ -4486,6 +4507,8 @@
uint32_t fw_size, app_arch;
uint32_t app_id = 0;
+ __qseecom_processing_pending_lsnr_unregister();
+
if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
pr_err("Not allowed to be called in %d state\n",
atomic_read(&qseecom.qseecom_state));
@@ -4658,6 +4681,8 @@
unsigned long flags = 0;
bool found_handle = false;
+ __qseecom_processing_pending_lsnr_unregister();
+
if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
pr_err("Not allowed to be called in %d state\n",
atomic_read(&qseecom.qseecom_state));
@@ -4706,6 +4731,8 @@
struct qseecom_dev_handle *data;
bool perf_enabled = false;
+ __qseecom_processing_pending_lsnr_unregister();
+
if (atomic_read(&qseecom.qseecom_state) != QSEECOM_STATE_READY) {
pr_err("Not allowed to be called in %d state\n",
atomic_read(&qseecom.qseecom_state));
@@ -7018,6 +7045,11 @@
pr_err("Aborting qseecom driver\n");
return -ENODEV;
}
+ if (cmd != QSEECOM_IOCTL_RECEIVE_REQ &&
+ cmd != QSEECOM_IOCTL_SEND_RESP_REQ &&
+ cmd != QSEECOM_IOCTL_SEND_MODFD_RESP &&
+ cmd != QSEECOM_IOCTL_SEND_MODFD_RESP_64)
+ __qseecom_processing_pending_lsnr_unregister();
switch (cmd) {
case QSEECOM_IOCTL_REGISTER_LISTENER_REQ: {
@@ -7028,13 +7060,13 @@
break;
}
pr_debug("ioctl register_listener_req()\n");
- mutex_lock(&app_access_lock);
+ mutex_lock(&listener_access_lock);
atomic_inc(&data->ioctl_count);
data->type = QSEECOM_LISTENER_SERVICE;
ret = qseecom_register_listener(data, argp);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
- mutex_unlock(&app_access_lock);
+ mutex_unlock(&listener_access_lock);
if (ret)
pr_err("failed qseecom_register_listener: %d\n", ret);
break;
@@ -7060,14 +7092,12 @@
break;
}
pr_debug("ioctl unregister_listener_req()\n");
- __qseecom_listener_abort_all(1);
- mutex_lock(&app_access_lock);
+ mutex_lock(&listener_access_lock);
atomic_inc(&data->ioctl_count);
ret = qseecom_unregister_listener(data);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
- mutex_unlock(&app_access_lock);
- __qseecom_listener_abort_all(0);
+ mutex_unlock(&listener_access_lock);
if (ret)
pr_err("failed qseecom_unregister_listener: %d\n", ret);
break;
@@ -7222,6 +7252,7 @@
ret = -EINVAL;
break;
}
+ mutex_lock(&listener_access_lock);
atomic_inc(&data->ioctl_count);
if (!qseecom.qsee_reentrancy_support)
ret = qseecom_send_resp();
@@ -7229,6 +7260,7 @@
ret = qseecom_reentrancy_send_resp(data);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
+ mutex_unlock(&listener_access_lock);
if (ret)
pr_err("failed qseecom_send_resp: %d\n", ret);
break;
@@ -7564,6 +7596,7 @@
ret = -EINVAL;
break;
}
+ mutex_lock(&listener_access_lock);
atomic_inc(&data->ioctl_count);
if (cmd == QSEECOM_IOCTL_SEND_MODFD_RESP)
ret = qseecom_send_modfd_resp(data, argp);
@@ -7571,6 +7604,7 @@
ret = qseecom_send_modfd_resp_64(data, argp);
atomic_dec(&data->ioctl_count);
wake_up_all(&data->abort_wq);
+ mutex_unlock(&listener_access_lock);
if (ret)
pr_err("failed qseecom_send_mod_resp: %d\n", ret);
__qseecom_clean_data_sglistinfo(data);
@@ -7728,18 +7762,18 @@
{
struct qseecom_dev_handle *data = file->private_data;
int ret = 0;
+ bool free_private_data = true;
if (data->released == false) {
pr_debug("data: released=false, type=%d, mode=%d, data=0x%pK\n",
data->type, data->mode, data);
switch (data->type) {
case QSEECOM_LISTENER_SERVICE:
- pr_warn("release lsnr svc %d\n", data->listener.id);
- __qseecom_listener_abort_all(1);
- mutex_lock(&app_access_lock);
+ pr_debug("release lsnr svc %d\n", data->listener.id);
+ free_private_data = false;
+ mutex_lock(&listener_access_lock);
ret = qseecom_unregister_listener(data);
- mutex_unlock(&app_access_lock);
- __qseecom_listener_abort_all(0);
+ mutex_unlock(&listener_access_lock);
break;
case QSEECOM_CLIENT_APP:
mutex_lock(&app_access_lock);
@@ -7778,8 +7812,9 @@
if (data->perf_enabled == true)
qsee_disable_clock_vote(data, CLK_DFAB);
}
- kfree(data);
+ if (free_private_data)
+ kfree(data);
return ret;
}
@@ -8606,12 +8641,13 @@
}
INIT_LIST_HEAD(&qseecom.registered_listener_list_head);
- spin_lock_init(&qseecom.registered_listener_list_lock);
INIT_LIST_HEAD(&qseecom.registered_app_list_head);
spin_lock_init(&qseecom.registered_app_list_lock);
+ INIT_LIST_HEAD(&qseecom.unregister_lsnr_pending_list_head);
INIT_LIST_HEAD(&qseecom.registered_kclient_list_head);
spin_lock_init(&qseecom.registered_kclient_list_lock);
init_waitqueue_head(&qseecom.send_resp_wq);
+ init_waitqueue_head(&qseecom.register_lsnr_pending_wq);
qseecom.send_resp_flag = 0;
qseecom.qsee_version = QSEEE_VERSION_00;
diff --git a/drivers/misc/tsl2550.c b/drivers/misc/tsl2550.c
index 87a1337..eb57610 100644
--- a/drivers/misc/tsl2550.c
+++ b/drivers/misc/tsl2550.c
@@ -177,7 +177,7 @@
} else
lux = 0;
else
- return -EAGAIN;
+ return 0;
/* LUX range check */
return lux > TSL2550_MAX_LUX ? TSL2550_MAX_LUX : lux;
diff --git a/drivers/misc/vmw_balloon.c b/drivers/misc/vmw_balloon.c
index 518e2de..5e9122cd 100644
--- a/drivers/misc/vmw_balloon.c
+++ b/drivers/misc/vmw_balloon.c
@@ -45,6 +45,7 @@
#include <linux/seq_file.h>
#include <linux/vmw_vmci_defs.h>
#include <linux/vmw_vmci_api.h>
+#include <linux/io.h>
#include <asm/hypervisor.h>
MODULE_AUTHOR("VMware, Inc.");
diff --git a/drivers/misc/vmw_vmci/vmci_queue_pair.c b/drivers/misc/vmw_vmci/vmci_queue_pair.c
index f735ab4..5927db04 100644
--- a/drivers/misc/vmw_vmci/vmci_queue_pair.c
+++ b/drivers/misc/vmw_vmci/vmci_queue_pair.c
@@ -755,7 +755,7 @@
retval = get_user_pages_fast((uintptr_t) produce_uva,
produce_q->kernel_if->num_pages, 1,
produce_q->kernel_if->u.h.header_page);
- if (retval < produce_q->kernel_if->num_pages) {
+ if (retval < (int)produce_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(produce) failed (retval=%d)",
retval);
qp_release_pages(produce_q->kernel_if->u.h.header_page,
@@ -767,7 +767,7 @@
retval = get_user_pages_fast((uintptr_t) consume_uva,
consume_q->kernel_if->num_pages, 1,
consume_q->kernel_if->u.h.header_page);
- if (retval < consume_q->kernel_if->num_pages) {
+ if (retval < (int)consume_q->kernel_if->num_pages) {
pr_debug("get_user_pages_fast(consume) failed (retval=%d)",
retval);
qp_release_pages(consume_q->kernel_if->u.h.header_page,
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 65998b7..0d08665 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -685,9 +685,6 @@
mmc_hostname(card->host),
card->ext_csd.barrier_support,
card->ext_csd.cache_flush_policy);
- card->ext_csd.enhanced_rpmb_supported =
- (card->ext_csd.rel_param &
- EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
} else {
card->ext_csd.cmdq_support = 0;
card->ext_csd.cmdq_depth = 0;
@@ -709,6 +706,13 @@
card->ext_csd.device_life_time_est_typ_b =
ext_csd[EXT_CSD_DEVICE_LIFE_TIME_EST_TYP_B];
}
+
+ /* eMMC v5.1 or later */
+ if (card->ext_csd.rev >= 8)
+ card->ext_csd.enhanced_rpmb_supported =
+ (card->ext_csd.rel_param &
+ EXT_CSD_WR_REL_PARAM_EN_RPMB_REL_WR);
+
out:
return err;
}
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index a4d35d9..765c1f5 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -684,7 +684,7 @@
cq_host->thist[cq_host->thist_idx].is_dcmd = is_dcmd;
memcpy(&cq_host->thist[cq_host->thist_idx++].task,
- &task, cq_host->task_desc_len);
+ &task, sizeof(task));
}
static void cmdq_prep_dcmd_desc(struct mmc_host *mmc,
diff --git a/drivers/net/appletalk/ipddp.c b/drivers/net/appletalk/ipddp.c
index 2e46496..4e98e5a 100644
--- a/drivers/net/appletalk/ipddp.c
+++ b/drivers/net/appletalk/ipddp.c
@@ -284,8 +284,12 @@
case SIOCFINDIPDDPRT:
spin_lock_bh(&ipddp_route_lock);
rp = __ipddp_find_route(&rcp);
- if (rp)
- memcpy(&rcp2, rp, sizeof(rcp2));
+ if (rp) {
+ memset(&rcp2, 0, sizeof(rcp2));
+ rcp2.ip = rp->ip;
+ rcp2.at = rp->at;
+ rcp2.flags = rp->flags;
+ }
spin_unlock_bh(&ipddp_route_lock);
if (rp) {
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 8a5e0ae..b1ea29d 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -216,6 +216,7 @@
static void bond_slave_arr_handler(struct work_struct *work);
static bool bond_time_in_interval(struct bonding *bond, unsigned long last_act,
int mod);
+static void bond_netdev_notify_work(struct work_struct *work);
/*---------------------------- General routines -----------------------------*/
@@ -1250,6 +1251,8 @@
return NULL;
}
}
+ INIT_DELAYED_WORK(&slave->notify_work, bond_netdev_notify_work);
+
return slave;
}
@@ -1257,6 +1260,7 @@
{
struct bonding *bond = bond_get_bond_by_slave(slave);
+ cancel_delayed_work_sync(&slave->notify_work);
if (BOND_MODE(bond) == BOND_MODE_8023AD)
kfree(SLAVE_AD_INFO(slave));
@@ -1278,39 +1282,26 @@
info->link_failure_count = slave->link_failure_count;
}
-static void bond_netdev_notify(struct net_device *dev,
- struct netdev_bonding_info *info)
-{
- rtnl_lock();
- netdev_bonding_info_change(dev, info);
- rtnl_unlock();
-}
-
static void bond_netdev_notify_work(struct work_struct *_work)
{
- struct netdev_notify_work *w =
- container_of(_work, struct netdev_notify_work, work.work);
+ struct slave *slave = container_of(_work, struct slave,
+ notify_work.work);
- bond_netdev_notify(w->dev, &w->bonding_info);
- dev_put(w->dev);
- kfree(w);
+ if (rtnl_trylock()) {
+ struct netdev_bonding_info binfo;
+
+ bond_fill_ifslave(slave, &binfo.slave);
+ bond_fill_ifbond(slave->bond, &binfo.master);
+ netdev_bonding_info_change(slave->dev, &binfo);
+ rtnl_unlock();
+ } else {
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 1);
+ }
}
void bond_queue_slave_event(struct slave *slave)
{
- struct bonding *bond = slave->bond;
- struct netdev_notify_work *nnw = kzalloc(sizeof(*nnw), GFP_ATOMIC);
-
- if (!nnw)
- return;
-
- dev_hold(slave->dev);
- nnw->dev = slave->dev;
- bond_fill_ifslave(slave, &nnw->bonding_info.slave);
- bond_fill_ifbond(bond, &nnw->bonding_info.master);
- INIT_DELAYED_WORK(&nnw->work, bond_netdev_notify_work);
-
- queue_delayed_work(slave->bond->wq, &nnw->work, 0);
+ queue_delayed_work(slave->bond->wq, &slave->notify_work, 0);
}
void bond_lower_state_changed(struct slave *slave)
diff --git a/drivers/net/dsa/bcm_sf2.c b/drivers/net/dsa/bcm_sf2.c
index 2ce7ae9..c2cd540 100644
--- a/drivers/net/dsa/bcm_sf2.c
+++ b/drivers/net/dsa/bcm_sf2.c
@@ -744,7 +744,6 @@
static int bcm_sf2_sw_resume(struct dsa_switch *ds)
{
struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
- unsigned int port;
int ret;
ret = bcm_sf2_sw_rst(priv);
@@ -756,12 +755,7 @@
if (priv->hw_params.num_gphy == 1)
bcm_sf2_gphy_enable_set(ds, true);
- for (port = 0; port < DSA_MAX_PORTS; port++) {
- if ((1 << port) & ds->enabled_port_mask)
- bcm_sf2_port_setup(ds, port, NULL);
- else if (dsa_is_cpu_port(ds, port))
- bcm_sf2_imp_setup(ds, port);
- }
+ ds->ops->setup(ds);
return 0;
}
@@ -1135,10 +1129,10 @@
{
struct bcm_sf2_priv *priv = platform_get_drvdata(pdev);
- /* Disable all ports and interrupts */
priv->wol_ports_mask = 0;
- bcm_sf2_sw_suspend(priv->dev->ds);
dsa_unregister_switch(priv->dev->ds);
+ /* Disable all ports and interrupts */
+ bcm_sf2_sw_suspend(priv->dev->ds);
bcm_sf2_mdio_unregister(priv);
return 0;
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.c b/drivers/net/ethernet/amazon/ena/ena_netdev.c
index 0d9ce08..1d92e03 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.c
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.c
@@ -422,7 +422,7 @@
return -ENOMEM;
}
- dma = dma_map_page(rx_ring->dev, page, 0, PAGE_SIZE,
+ dma = dma_map_page(rx_ring->dev, page, 0, ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(rx_ring->dev, dma))) {
u64_stats_update_begin(&rx_ring->syncp);
@@ -439,7 +439,7 @@
rx_info->page_offset = 0;
ena_buf = &rx_info->ena_buf;
ena_buf->paddr = dma;
- ena_buf->len = PAGE_SIZE;
+ ena_buf->len = ENA_PAGE_SIZE;
return 0;
}
@@ -456,7 +456,7 @@
return;
}
- dma_unmap_page(rx_ring->dev, ena_buf->paddr, PAGE_SIZE,
+ dma_unmap_page(rx_ring->dev, ena_buf->paddr, ENA_PAGE_SIZE,
DMA_FROM_DEVICE);
__free_page(page);
@@ -849,10 +849,10 @@
do {
dma_unmap_page(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr),
- PAGE_SIZE, DMA_FROM_DEVICE);
+ ENA_PAGE_SIZE, DMA_FROM_DEVICE);
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_info->page,
- rx_info->page_offset, len, PAGE_SIZE);
+ rx_info->page_offset, len, ENA_PAGE_SIZE);
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx skb updated. len %d. data_len %d\n",
diff --git a/drivers/net/ethernet/amazon/ena/ena_netdev.h b/drivers/net/ethernet/amazon/ena/ena_netdev.h
index c5eaf76..008f2d5 100644
--- a/drivers/net/ethernet/amazon/ena/ena_netdev.h
+++ b/drivers/net/ethernet/amazon/ena/ena_netdev.h
@@ -321,4 +321,15 @@
int ena_get_sset_count(struct net_device *netdev, int sset);
+/* The ENA buffer length fields is 16 bit long. So when PAGE_SIZE == 64kB the
+ * driver passas 0.
+ * Since the max packet size the ENA handles is ~9kB limit the buffer length to
+ * 16kB.
+ */
+#if PAGE_SIZE > SZ_16K
+#define ENA_PAGE_SIZE SZ_16K
+#else
+#define ENA_PAGE_SIZE PAGE_SIZE
+#endif
+
#endif /* !(ENA_H) */
diff --git a/drivers/net/ethernet/broadcom/bcmsysport.c b/drivers/net/ethernet/broadcom/bcmsysport.c
index 91fbba5..16dc9ac 100644
--- a/drivers/net/ethernet/broadcom/bcmsysport.c
+++ b/drivers/net/ethernet/broadcom/bcmsysport.c
@@ -828,14 +828,22 @@
{
u32 reg;
- /* Stop monitoring MPD interrupt */
- intrl2_0_mask_set(priv, INTRL2_0_MPD);
-
/* Clear the MagicPacket detection logic */
reg = umac_readl(priv, UMAC_MPD_CTRL);
reg &= ~MPD_EN;
umac_writel(priv, reg, UMAC_MPD_CTRL);
+ reg = intrl2_0_readl(priv, INTRL2_CPU_STATUS);
+ if (reg & INTRL2_0_MPD)
+ netdev_info(priv->netdev, "Wake-on-LAN (MPD) interrupt!\n");
+
+ if (reg & INTRL2_0_BRCM_MATCH_TAG) {
+ reg = rxchk_readl(priv, RXCHK_BRCM_TAG_MATCH_STATUS) &
+ RXCHK_BRCM_TAG_MATCH_MASK;
+ netdev_info(priv->netdev,
+ "Wake-on-LAN (filters 0x%02x) interrupt!\n", reg);
+ }
+
netif_dbg(priv, wol, priv->netdev, "resumed from WOL\n");
}
@@ -868,11 +876,6 @@
if (priv->irq0_stat & INTRL2_0_TX_RING_FULL)
bcm_sysport_tx_reclaim_all(priv);
- if (priv->irq0_stat & INTRL2_0_MPD) {
- netdev_info(priv->netdev, "Wake-on-LAN interrupt!\n");
- bcm_sysport_resume_from_wol(priv);
- }
-
return IRQ_HANDLED;
}
@@ -1901,9 +1904,6 @@
/* UniMAC receive needs to be turned on */
umac_enable_set(priv, CMD_RX_EN, 1);
- /* Enable the interrupt wake-up source */
- intrl2_0_mask_clear(priv, INTRL2_0_MPD);
-
netif_dbg(priv, wol, ndev, "entered WOL mode\n");
return 0;
diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
index 72297b7..208e9da 100644
--- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c
+++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c
@@ -1666,8 +1666,11 @@
if (TX_CMP_TYPE(txcmp) == CMP_TYPE_TX_L2_CMP) {
tx_pkts++;
/* return full budget so NAPI will complete. */
- if (unlikely(tx_pkts > bp->tx_wake_thresh))
+ if (unlikely(tx_pkts > bp->tx_wake_thresh)) {
rx_pkts = budget;
+ raw_cons = NEXT_RAW_CMP(raw_cons);
+ break;
+ }
} else if ((TX_CMP_TYPE(txcmp) & 0x30) == 0x10) {
rc = bnxt_rx_pkt(bp, bnapi, &raw_cons, &agg_event);
if (likely(rc >= 0))
@@ -1685,7 +1688,7 @@
}
raw_cons = NEXT_RAW_CMP(raw_cons);
- if (rx_pkts == budget)
+ if (rx_pkts && rx_pkts == budget)
break;
}
@@ -1797,8 +1800,12 @@
while (1) {
work_done += bnxt_poll_work(bp, bnapi, budget - work_done);
- if (work_done >= budget)
+ if (work_done >= budget) {
+ if (!budget)
+ BNXT_CP_DB_REARM(cpr->cp_doorbell,
+ cpr->cp_raw_cons);
break;
+ }
if (!bnxt_has_work(bp, cpr)) {
napi_complete(napi);
diff --git a/drivers/net/ethernet/cadence/macb.c b/drivers/net/ethernet/cadence/macb.c
index ec09fce..8f55c23 100644
--- a/drivers/net/ethernet/cadence/macb.c
+++ b/drivers/net/ethernet/cadence/macb.c
@@ -517,7 +517,7 @@
if (!(status & MACB_BIT(TGO)))
return 0;
- usleep_range(10, 250);
+ udelay(250);
} while (time_before(halt_time, timeout));
return -ETIMEDOUT;
@@ -2861,6 +2861,13 @@
.init = macb_init,
};
+static const struct macb_config sama5d3macb_config = {
+ .caps = MACB_CAPS_SG_DISABLED
+ | MACB_CAPS_USRIO_HAS_CLKEN | MACB_CAPS_USRIO_DEFAULT_IS_MII_GMII,
+ .clk_init = macb_clk_init,
+ .init = macb_init,
+};
+
static const struct macb_config pc302gem_config = {
.caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE,
.dma_burst_length = 16,
@@ -2925,6 +2932,7 @@
{ .compatible = "cdns,gem", .data = &pc302gem_config },
{ .compatible = "atmel,sama5d2-gem", .data = &sama5d2_config },
{ .compatible = "atmel,sama5d3-gem", .data = &sama5d3_config },
+ { .compatible = "atmel,sama5d3-macb", .data = &sama5d3macb_config },
{ .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
{ .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
{ .compatible = "cdns,emac", .data = &emac_config },
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.c b/drivers/net/ethernet/hisilicon/hns/hnae.c
index b6ed818..06bc863 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.c
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.c
@@ -80,7 +80,7 @@
if (cb->type == DESC_TYPE_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
- else
+ else if (cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring));
}
diff --git a/drivers/net/ethernet/hisilicon/hns/hnae.h b/drivers/net/ethernet/hisilicon/hns/hnae.h
index e093cbf..f9d6845 100644
--- a/drivers/net/ethernet/hisilicon/hns/hnae.h
+++ b/drivers/net/ethernet/hisilicon/hns/hnae.h
@@ -213,10 +213,10 @@
/* priv data for the desc, e.g. skb when use with ip stack*/
void *priv;
- u16 page_offset;
- u16 reuse_flag;
+ u32 page_offset;
+ u32 length; /* length of the buffer */
- u16 length; /* length of the buffer */
+ u16 reuse_flag;
/* desc type, used by the ring user to mark the type of the priv data */
u16 type;
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_enet.c b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
index 111e1aa..92ed653 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_enet.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_enet.c
@@ -39,9 +39,9 @@
#define SKB_TMP_LEN(SKB) \
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
-static void fill_v2_desc(struct hnae_ring *ring, void *priv,
- int size, dma_addr_t dma, int frag_end,
- int buf_num, enum hns_desc_type type, int mtu)
+static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
+ int send_sz, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
{
struct hnae_desc *desc = &ring->desc[ring->next_to_use];
struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
@@ -63,7 +63,7 @@
desc_cb->type = type;
desc->addr = cpu_to_le64(dma);
- desc->tx.send_size = cpu_to_le16((u16)size);
+ desc->tx.send_size = cpu_to_le16((u16)send_sz);
/* config bd buffer end */
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
@@ -132,6 +132,14 @@
ring_ptr_move_fw(ring, next_to_use);
}
+static void fill_v2_desc(struct hnae_ring *ring, void *priv,
+ int size, dma_addr_t dma, int frag_end,
+ int buf_num, enum hns_desc_type type, int mtu)
+{
+ fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
+ buf_num, type, mtu);
+}
+
static const struct acpi_device_id hns_enet_acpi_match[] = {
{ "HISI00C1", 0 },
{ "HISI00C2", 0 },
@@ -288,15 +296,15 @@
/* when the frag size is bigger than hardware, split this frag */
for (k = 0; k < frag_buf_num; k++)
- fill_v2_desc(ring, priv,
- (k == frag_buf_num - 1) ?
+ fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
+ (k == frag_buf_num - 1) ?
sizeoflast : BD_MAX_SEND_SIZE,
- dma + BD_MAX_SEND_SIZE * k,
- frag_end && (k == frag_buf_num - 1) ? 1 : 0,
- buf_num,
- (type == DESC_TYPE_SKB && !k) ?
+ dma + BD_MAX_SEND_SIZE * k,
+ frag_end && (k == frag_buf_num - 1) ? 1 : 0,
+ buf_num,
+ (type == DESC_TYPE_SKB && !k) ?
DESC_TYPE_SKB : DESC_TYPE_PAGE,
- mtu);
+ mtu);
}
netdev_tx_t hns_nic_net_xmit_hw(struct net_device *ndev,
@@ -529,7 +537,7 @@
}
skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
- size - pull_len, truesize - pull_len);
+ size - pull_len, truesize);
/* avoid re-using remote pages,flag default unreuse */
if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
index 6be0cae..4cd1633 100644
--- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
+++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c
@@ -243,7 +243,9 @@
}
if (h->dev->ops->adjust_link) {
+ netif_carrier_off(net_dev);
h->dev->ops->adjust_link(h, (int)speed, cmd->base.duplex);
+ netif_carrier_on(net_dev);
return 0;
}
diff --git a/drivers/net/ethernet/hp/hp100.c b/drivers/net/ethernet/hp/hp100.c
index 631dbc7..0988bf1 100644
--- a/drivers/net/ethernet/hp/hp100.c
+++ b/drivers/net/ethernet/hp/hp100.c
@@ -2636,7 +2636,7 @@
/* Wait for link to drop */
time = jiffies + (HZ / 10);
do {
- if (~(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
+ if (!(hp100_inb(VG_LAN_CFG_1) & HP100_LINK_UP_ST))
break;
if (!in_interrupt())
schedule_timeout_interruptible(1);
diff --git a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
index 975eeb8..e84574b 100644
--- a/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
+++ b/drivers/net/ethernet/intel/e1000/e1000_ethtool.c
@@ -645,14 +645,14 @@
adapter->tx_ring = tx_old;
e1000_free_all_rx_resources(adapter);
e1000_free_all_tx_resources(adapter);
- kfree(tx_old);
- kfree(rx_old);
adapter->rx_ring = rxdr;
adapter->tx_ring = txdr;
err = e1000_up(adapter);
if (err)
goto err_setup;
}
+ kfree(tx_old);
+ kfree(rx_old);
clear_bit(__E1000_RESETTING, &adapter->flags);
return 0;
@@ -665,7 +665,8 @@
err_alloc_rx:
kfree(txdr);
err_alloc_tx:
- e1000_up(adapter);
+ if (netif_running(adapter->netdev))
+ e1000_up(adapter);
err_setup:
clear_bit(__E1000_RESETTING, &adapter->flags);
return err;
diff --git a/drivers/net/ethernet/marvell/mvpp2.c b/drivers/net/ethernet/marvell/mvpp2.c
index 7e2ebfc..ff62dc7 100644
--- a/drivers/net/ethernet/marvell/mvpp2.c
+++ b/drivers/net/ethernet/marvell/mvpp2.c
@@ -29,6 +29,7 @@
#include <linux/clk.h>
#include <linux/hrtimer.h>
#include <linux/ktime.h>
+#include <linux/if_vlan.h>
#include <uapi/linux/ppp_defs.h>
#include <net/ip.h>
#include <net/ipv6.h>
@@ -4266,7 +4267,7 @@
}
/* Set Tx descriptors fields relevant for CSUM calculation */
-static u32 mvpp2_txq_desc_csum(int l3_offs, int l3_proto,
+static u32 mvpp2_txq_desc_csum(int l3_offs, __be16 l3_proto,
int ip_hdr_len, int l4_proto)
{
u32 command;
@@ -5019,14 +5020,15 @@
if (skb->ip_summed == CHECKSUM_PARTIAL) {
int ip_hdr_len = 0;
u8 l4_proto;
+ __be16 l3_proto = vlan_get_protocol(skb);
- if (skb->protocol == htons(ETH_P_IP)) {
+ if (l3_proto == htons(ETH_P_IP)) {
struct iphdr *ip4h = ip_hdr(skb);
/* Calculate IPv4 checksum and L4 checksum */
ip_hdr_len = ip4h->ihl;
l4_proto = ip4h->protocol;
- } else if (skb->protocol == htons(ETH_P_IPV6)) {
+ } else if (l3_proto == htons(ETH_P_IPV6)) {
struct ipv6hdr *ip6h = ipv6_hdr(skb);
/* Read l4_protocol from one of IPv6 extra headers */
@@ -5038,7 +5040,7 @@
}
return mvpp2_txq_desc_csum(skb_network_offset(skb),
- skb->protocol, ip_hdr_len, l4_proto);
+ l3_proto, ip_hdr_len, l4_proto);
}
return MVPP2_TXD_L4_CSUM_NOT | MVPP2_TXD_IP_CSUM_DISABLE;
diff --git a/drivers/net/ethernet/mellanox/mlx4/eq.c b/drivers/net/ethernet/mellanox/mlx4/eq.c
index 0509996..e70d6fe 100644
--- a/drivers/net/ethernet/mellanox/mlx4/eq.c
+++ b/drivers/net/ethernet/mellanox/mlx4/eq.c
@@ -240,7 +240,8 @@
struct mlx4_dev *dev = &priv->dev;
struct mlx4_eq *eq = &priv->eq_table.eq[vec];
- if (!eq->affinity_mask || cpumask_empty(eq->affinity_mask))
+ if (!cpumask_available(eq->affinity_mask) ||
+ cpumask_empty(eq->affinity_mask))
return;
hint_err = irq_set_affinity_hint(eq->irq, eq->affinity_mask);
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/dev.c b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
index a9dbc28..524fff2 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/dev.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/dev.c
@@ -288,16 +288,17 @@
}
}
-static u16 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
+static u32 mlx5_gen_pci_id(struct mlx5_core_dev *dev)
{
- return (u16)((dev->pdev->bus->number << 8) |
+ return (u32)((pci_domain_nr(dev->pdev->bus) << 16) |
+ (dev->pdev->bus->number << 8) |
PCI_SLOT(dev->pdev->devfn));
}
/* Must be called with intf_mutex held */
struct mlx5_core_dev *mlx5_get_next_phys_dev(struct mlx5_core_dev *dev)
{
- u16 pci_id = mlx5_gen_pci_id(dev);
+ u32 pci_id = mlx5_gen_pci_id(dev);
struct mlx5_core_dev *res = NULL;
struct mlx5_core_dev *tmp_dev;
struct mlx5_priv *priv;
diff --git a/drivers/net/ethernet/qlogic/qed/qed_mcp.c b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
index eaa242d..e175fcd 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_mcp.c
+++ b/drivers/net/ethernet/qlogic/qed/qed_mcp.c
@@ -97,18 +97,57 @@
return 0;
}
+/* Maximum of 1 sec to wait for the SHMEM ready indication */
+#define QED_MCP_SHMEM_RDY_MAX_RETRIES 20
+#define QED_MCP_SHMEM_RDY_ITER_MS 50
+
static int qed_load_mcp_offsets(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
struct qed_mcp_info *p_info = p_hwfn->mcp_info;
+ u8 cnt = QED_MCP_SHMEM_RDY_MAX_RETRIES;
+ u8 msec = QED_MCP_SHMEM_RDY_ITER_MS;
u32 drv_mb_offsize, mfw_mb_offsize;
u32 mcp_pf_id = MCP_PF_ID(p_hwfn);
p_info->public_base = qed_rd(p_hwfn, p_ptt, MISC_REG_SHARED_MEM_ADDR);
- if (!p_info->public_base)
- return 0;
+ if (!p_info->public_base) {
+ DP_NOTICE(p_hwfn,
+ "The address of the MCP scratch-pad is not configured\n");
+ return -EINVAL;
+ }
p_info->public_base |= GRCBASE_MCP;
+ /* Get the MFW MB address and number of supported messages */
+ mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
+ SECTION_OFFSIZE_ADDR(p_info->public_base,
+ PUBLIC_MFW_MB));
+ p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
+ p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr +
+ offsetof(struct public_mfw_mb,
+ sup_msgs));
+
+ /* The driver can notify that there was an MCP reset, and might read the
+ * SHMEM values before the MFW has completed initializing them.
+ * To avoid this, the "sup_msgs" field in the MFW mailbox is used as a
+ * data ready indication.
+ */
+ while (!p_info->mfw_mb_length && --cnt) {
+ msleep(msec);
+ p_info->mfw_mb_length =
+ (u16)qed_rd(p_hwfn, p_ptt,
+ p_info->mfw_mb_addr +
+ offsetof(struct public_mfw_mb, sup_msgs));
+ }
+
+ if (!cnt) {
+ DP_NOTICE(p_hwfn,
+ "Failed to get the SHMEM ready notification after %d msec\n",
+ QED_MCP_SHMEM_RDY_MAX_RETRIES * msec);
+ return -EBUSY;
+ }
+
/* Calculate the driver and MFW mailbox address */
drv_mb_offsize = qed_rd(p_hwfn, p_ptt,
SECTION_OFFSIZE_ADDR(p_info->public_base,
@@ -118,13 +157,6 @@
"drv_mb_offsiz = 0x%x, drv_mb_addr = 0x%x mcp_pf_id = 0x%x\n",
drv_mb_offsize, p_info->drv_mb_addr, mcp_pf_id);
- /* Set the MFW MB address */
- mfw_mb_offsize = qed_rd(p_hwfn, p_ptt,
- SECTION_OFFSIZE_ADDR(p_info->public_base,
- PUBLIC_MFW_MB));
- p_info->mfw_mb_addr = SECTION_ADDR(mfw_mb_offsize, mcp_pf_id);
- p_info->mfw_mb_length = (u16)qed_rd(p_hwfn, p_ptt, p_info->mfw_mb_addr);
-
/* Get the current driver mailbox sequence before sending
* the first command
*/
@@ -1198,31 +1230,61 @@
return rc;
}
+/* A maximal 100 msec waiting time for the MCP to halt */
+#define QED_MCP_HALT_SLEEP_MS 10
+#define QED_MCP_HALT_MAX_RETRIES 10
+
int qed_mcp_halt(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 resp = 0, param = 0;
+ u32 resp = 0, param = 0, cpu_state, cnt = 0;
int rc;
rc = qed_mcp_cmd(p_hwfn, p_ptt, DRV_MSG_CODE_MCP_HALT, 0, &resp,
¶m);
- if (rc)
+ if (rc) {
DP_ERR(p_hwfn, "MCP response failure, aborting\n");
+ return rc;
+ }
- return rc;
+ do {
+ msleep(QED_MCP_HALT_SLEEP_MS);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED)
+ break;
+ } while (++cnt < QED_MCP_HALT_MAX_RETRIES);
+
+ if (cnt == QED_MCP_HALT_MAX_RETRIES) {
+ DP_NOTICE(p_hwfn,
+ "Failed to halt the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE), cpu_state);
+ return -EBUSY;
+ }
+
+ return 0;
}
+#define QED_MCP_RESUME_SLEEP_MS 10
+
int qed_mcp_resume(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
- u32 value, cpu_mode;
+ u32 cpu_mode, cpu_state;
qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_STATE, 0xffffffff);
- value = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
- value &= ~MCP_REG_CPU_MODE_SOFT_HALT;
- qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, value);
cpu_mode = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_MODE);
+ cpu_mode &= ~MCP_REG_CPU_MODE_SOFT_HALT;
+ qed_wr(p_hwfn, p_ptt, MCP_REG_CPU_MODE, cpu_mode);
+ msleep(QED_MCP_RESUME_SLEEP_MS);
+ cpu_state = qed_rd(p_hwfn, p_ptt, MCP_REG_CPU_STATE);
- return (cpu_mode & MCP_REG_CPU_MODE_SOFT_HALT) ? -EAGAIN : 0;
+ if (cpu_state & MCP_REG_CPU_STATE_SOFT_HALTED) {
+ DP_NOTICE(p_hwfn,
+ "Failed to resume the MCP [CPU_MODE = 0x%08x, CPU_STATE = 0x%08x]\n",
+ cpu_mode, cpu_state);
+ return -EBUSY;
+ }
+
+ return 0;
}
int qed_mcp_set_led(struct qed_hwfn *p_hwfn,
diff --git a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
index b414a05..56be1d6 100644
--- a/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
+++ b/drivers/net/ethernet/qlogic/qed/qed_reg_addr.h
@@ -510,6 +510,7 @@
0
#define MCP_REG_CPU_STATE \
0xe05004UL
+#define MCP_REG_CPU_STATE_SOFT_HALTED (0x1UL << 10)
#define MCP_REG_CPU_EVENT_MASK \
0xe05008UL
#define PGLUE_B_REG_PF_BAR0_SIZE \
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
index 49bad00..5ddadcd 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic.h
@@ -1800,7 +1800,8 @@
int (*config_loopback) (struct qlcnic_adapter *, u8);
int (*clear_loopback) (struct qlcnic_adapter *, u8);
int (*config_promisc_mode) (struct qlcnic_adapter *, u32);
- void (*change_l2_filter) (struct qlcnic_adapter *, u64 *, u16);
+ void (*change_l2_filter)(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *tx_ring);
int (*get_board_info) (struct qlcnic_adapter *);
void (*set_mac_filter_count) (struct qlcnic_adapter *);
void (*free_mac_list) (struct qlcnic_adapter *);
@@ -2042,9 +2043,10 @@
}
static inline void qlcnic_change_filter(struct qlcnic_adapter *adapter,
- u64 *addr, u16 id)
+ u64 *addr, u16 vlan,
+ struct qlcnic_host_tx_ring *tx_ring)
{
- adapter->ahw->hw_ops->change_l2_filter(adapter, addr, id);
+ adapter->ahw->hw_ops->change_l2_filter(adapter, addr, vlan, tx_ring);
}
static inline int qlcnic_get_board_info(struct qlcnic_adapter *adapter)
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
index c3c28f0..05d32e8 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.c
@@ -2132,7 +2132,8 @@
}
void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
- u16 vlan_id)
+ u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring)
{
u8 mac[ETH_ALEN];
memcpy(&mac, addr, ETH_ALEN);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
index 331ae2c..c8e012b 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_hw.h
@@ -550,7 +550,8 @@
int qlcnic_83xx_nic_set_promisc(struct qlcnic_adapter *, u32);
int qlcnic_83xx_config_hw_lro(struct qlcnic_adapter *, int);
int qlcnic_83xx_config_rss(struct qlcnic_adapter *, int);
-void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *, u64 *, u16);
+void qlcnic_83xx_change_l2_filter(struct qlcnic_adapter *adapter, u64 *addr,
+ u16 vlan, struct qlcnic_host_tx_ring *ring);
int qlcnic_83xx_get_pci_info(struct qlcnic_adapter *, struct qlcnic_pci_info *);
int qlcnic_83xx_set_nic_info(struct qlcnic_adapter *, struct qlcnic_info *);
void qlcnic_83xx_initialize_nic(struct qlcnic_adapter *, int);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
index 4bb33af..56a3bd9 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_hw.h
@@ -173,7 +173,8 @@
struct net_device *netdev);
void qlcnic_82xx_get_beacon_state(struct qlcnic_adapter *);
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter,
- u64 *uaddr, u16 vlan_id);
+ u64 *uaddr, u16 vlan_id,
+ struct qlcnic_host_tx_ring *tx_ring);
int qlcnic_82xx_config_intr_coalesce(struct qlcnic_adapter *,
struct ethtool_coalesce *);
int qlcnic_82xx_set_rx_coalesce(struct qlcnic_adapter *);
diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
index fedd736..e361294 100644
--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c
@@ -268,13 +268,12 @@
}
void qlcnic_82xx_change_filter(struct qlcnic_adapter *adapter, u64 *uaddr,
- u16 vlan_id)
+ u16 vlan_id, struct qlcnic_host_tx_ring *tx_ring)
{
struct cmd_desc_type0 *hwdesc;
struct qlcnic_nic_req *req;
struct qlcnic_mac_req *mac_req;
struct qlcnic_vlan_req *vlan_req;
- struct qlcnic_host_tx_ring *tx_ring = adapter->tx_ring;
u32 producer;
u64 word;
@@ -301,7 +300,8 @@
static void qlcnic_send_filter(struct qlcnic_adapter *adapter,
struct cmd_desc_type0 *first_desc,
- struct sk_buff *skb)
+ struct sk_buff *skb,
+ struct qlcnic_host_tx_ring *tx_ring)
{
struct vlan_ethhdr *vh = (struct vlan_ethhdr *)(skb->data);
struct ethhdr *phdr = (struct ethhdr *)(skb->data);
@@ -335,7 +335,7 @@
tmp_fil->vlan_id == vlan_id) {
if (jiffies > (QLCNIC_READD_AGE * HZ + tmp_fil->ftime))
qlcnic_change_filter(adapter, &src_addr,
- vlan_id);
+ vlan_id, tx_ring);
tmp_fil->ftime = jiffies;
return;
}
@@ -350,7 +350,7 @@
if (!fil)
return;
- qlcnic_change_filter(adapter, &src_addr, vlan_id);
+ qlcnic_change_filter(adapter, &src_addr, vlan_id, tx_ring);
fil->ftime = jiffies;
fil->vlan_id = vlan_id;
memcpy(fil->faddr, &src_addr, ETH_ALEN);
@@ -766,7 +766,7 @@
}
if (adapter->drv_mac_learn)
- qlcnic_send_filter(adapter, first_desc, skb);
+ qlcnic_send_filter(adapter, first_desc, skb, tx_ring);
tx_ring->tx_stats.tx_bytes += skb->len;
tx_ring->tx_stats.xmit_called++;
diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
index f65e8cd..20f5c0c 100644
--- a/drivers/net/ethernet/realtek/r8169.c
+++ b/drivers/net/ethernet/realtek/r8169.c
@@ -760,7 +760,7 @@
};
enum rtl_flag {
- RTL_FLAG_TASK_ENABLED,
+ RTL_FLAG_TASK_ENABLED = 0,
RTL_FLAG_TASK_SLOW_PENDING,
RTL_FLAG_TASK_RESET_PENDING,
RTL_FLAG_TASK_PHY_PENDING,
@@ -7637,7 +7637,8 @@
rtl8169_update_counters(dev);
rtl_lock_work(tp);
- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
rtl8169_down(dev);
rtl_unlock_work(tp);
@@ -7820,7 +7821,9 @@
rtl_lock_work(tp);
napi_disable(&tp->napi);
- clear_bit(RTL_FLAG_TASK_ENABLED, tp->wk.flags);
+ /* Clear all task flags */
+ bitmap_zero(tp->wk.flags, RTL_FLAG_MAX);
+
rtl_unlock_work(tp);
rtl_pll_power_down(tp);
diff --git a/drivers/net/ethernet/renesas/ravb.h b/drivers/net/ethernet/renesas/ravb.h
index f110966..4103990 100644
--- a/drivers/net/ethernet/renesas/ravb.h
+++ b/drivers/net/ethernet/renesas/ravb.h
@@ -421,6 +421,7 @@
EIS_CULF1 = 0x00000080,
EIS_TFFF = 0x00000100,
EIS_QFS = 0x00010000,
+ EIS_RESERVED = (GENMASK(31, 17) | GENMASK(15, 11)),
};
/* RIC0 */
@@ -465,6 +466,7 @@
RIS0_FRF15 = 0x00008000,
RIS0_FRF16 = 0x00010000,
RIS0_FRF17 = 0x00020000,
+ RIS0_RESERVED = GENMASK(31, 18),
};
/* RIC1 */
@@ -521,6 +523,7 @@
RIS2_QFF16 = 0x00010000,
RIS2_QFF17 = 0x00020000,
RIS2_RFFF = 0x80000000,
+ RIS2_RESERVED = GENMASK(30, 18),
};
/* TIC */
@@ -537,6 +540,7 @@
TIS_FTF1 = 0x00000002, /* Undocumented? */
TIS_TFUF = 0x00000100,
TIS_TFWF = 0x00000200,
+ TIS_RESERVED = (GENMASK(31, 20) | GENMASK(15, 12) | GENMASK(7, 4))
};
/* ISS */
@@ -610,6 +614,7 @@
enum GIS_BIT {
GIS_PTCF = 0x00000001, /* Undocumented? */
GIS_PTMF = 0x00000004,
+ GIS_RESERVED = GENMASK(15, 10),
};
/* GIE (R-Car Gen3 only) */
diff --git a/drivers/net/ethernet/renesas/ravb_main.c b/drivers/net/ethernet/renesas/ravb_main.c
index 307ecd5..71836a7 100644
--- a/drivers/net/ethernet/renesas/ravb_main.c
+++ b/drivers/net/ethernet/renesas/ravb_main.c
@@ -717,10 +717,11 @@
u32 eis, ris2;
eis = ravb_read(ndev, EIS);
- ravb_write(ndev, ~EIS_QFS, EIS);
+ ravb_write(ndev, ~(EIS_QFS | EIS_RESERVED), EIS);
if (eis & EIS_QFS) {
ris2 = ravb_read(ndev, RIS2);
- ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF), RIS2);
+ ravb_write(ndev, ~(RIS2_QFF0 | RIS2_RFFF | RIS2_RESERVED),
+ RIS2);
/* Receive Descriptor Empty int */
if (ris2 & RIS2_QFF0)
@@ -773,7 +774,7 @@
u32 tis = ravb_read(ndev, TIS);
if (tis & TIS_TFUF) {
- ravb_write(ndev, ~TIS_TFUF, TIS);
+ ravb_write(ndev, ~(TIS_TFUF | TIS_RESERVED), TIS);
ravb_get_tx_tstamp(ndev);
return true;
}
@@ -908,7 +909,7 @@
/* Processing RX Descriptor Ring */
if (ris0 & mask) {
/* Clear RX interrupt */
- ravb_write(ndev, ~mask, RIS0);
+ ravb_write(ndev, ~(mask | RIS0_RESERVED), RIS0);
if (ravb_rx(ndev, "a, q))
goto out;
}
@@ -916,7 +917,7 @@
if (tis & mask) {
spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */
- ravb_write(ndev, ~mask, TIS);
+ ravb_write(ndev, ~(mask | TIS_RESERVED), TIS);
ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q);
mmiowb();
diff --git a/drivers/net/ethernet/renesas/ravb_ptp.c b/drivers/net/ethernet/renesas/ravb_ptp.c
index eede70e..9e3222f 100644
--- a/drivers/net/ethernet/renesas/ravb_ptp.c
+++ b/drivers/net/ethernet/renesas/ravb_ptp.c
@@ -319,7 +319,7 @@
}
}
- ravb_write(ndev, ~gis, GIS);
+ ravb_write(ndev, ~(gis | GIS_RESERVED), GIS);
}
void ravb_ptp_init(struct net_device *ndev, struct platform_device *pdev)
diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
index 890e4b0..2019e16 100644
--- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
+++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
@@ -71,7 +71,7 @@
* Description:
* This function validates the number of Unicast address entries supported
* by a particular Synopsys 10/100/1000 controller. The Synopsys controller
- * supports 1, 32, 64, or 128 Unicast filter entries for it's Unicast filter
+ * supports 1..32, 64, or 128 Unicast filter entries for it's Unicast filter
* logic. This function validates a valid, supported configuration is
* selected, and defaults to 1 Unicast address if an unsupported
* configuration is selected.
@@ -81,8 +81,7 @@
int x = ucast_entries;
switch (x) {
- case 1:
- case 32:
+ case 1 ... 32:
case 64:
case 128:
break;
diff --git a/drivers/net/phy/xilinx_gmii2rgmii.c b/drivers/net/phy/xilinx_gmii2rgmii.c
index 2e5150b..7a14e81 100644
--- a/drivers/net/phy/xilinx_gmii2rgmii.c
+++ b/drivers/net/phy/xilinx_gmii2rgmii.c
@@ -40,8 +40,11 @@
{
struct gmii2rgmii *priv = phydev->priv;
u16 val = 0;
+ int err;
- priv->phy_drv->read_status(phydev);
+ err = priv->phy_drv->read_status(phydev);
+ if (err < 0)
+ return err;
val = mdiobus_read(phydev->mdio.bus, priv->addr, XILINX_GMII2RGMII_REG);
val &= ~XILINX_GMII2RGMII_SPEED_MASK;
@@ -81,6 +84,11 @@
return -EPROBE_DEFER;
}
+ if (!priv->phy_dev->drv) {
+ dev_info(dev, "Attached phy not ready\n");
+ return -EPROBE_DEFER;
+ }
+
priv->addr = mdiodev->addr;
priv->phy_drv = priv->phy_dev->drv;
memcpy(&priv->conv_phy_drv, priv->phy_dev->drv,
diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
index f9ec009..9670aa2 100644
--- a/drivers/net/team/team.c
+++ b/drivers/net/team/team.c
@@ -1171,6 +1171,11 @@
return -EBUSY;
}
+ if (dev == port_dev) {
+ netdev_err(dev, "Cannot enslave team device to itself\n");
+ return -EINVAL;
+ }
+
if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
vlan_uses_dev(dev)) {
netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
diff --git a/drivers/net/usb/qmi_wwan.c b/drivers/net/usb/qmi_wwan.c
index 0d4440f..2b728cc 100644
--- a/drivers/net/usb/qmi_wwan.c
+++ b/drivers/net/usb/qmi_wwan.c
@@ -934,6 +934,7 @@
{QMI_FIXED_INTF(0x0b3c, 0xc00b, 4)}, /* Olivetti Olicard 500 */
{QMI_FIXED_INTF(0x1e2d, 0x0060, 4)}, /* Cinterion PLxx */
{QMI_FIXED_INTF(0x1e2d, 0x0053, 4)}, /* Cinterion PHxx,PXxx */
+ {QMI_FIXED_INTF(0x1e2d, 0x0063, 10)}, /* Cinterion ALASxx (1 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 4)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0082, 5)}, /* Cinterion PHxx,PXxx (2 RmNet) */
{QMI_FIXED_INTF(0x1e2d, 0x0083, 4)}, /* Cinterion PHxx,PXxx (1 RmNet + USB Audio)*/
diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c
index 03d0401..8d3f938c 100644
--- a/drivers/net/usb/smsc75xx.c
+++ b/drivers/net/usb/smsc75xx.c
@@ -1518,6 +1518,7 @@
{
struct smsc75xx_priv *pdata = (struct smsc75xx_priv *)(dev->data[0]);
if (pdata) {
+ cancel_work_sync(&pdata->set_multicast);
netif_dbg(dev, ifdown, dev->net, "free pdata\n");
kfree(pdata);
pdata = NULL;
diff --git a/drivers/net/wireless/ath/ath10k/debug.c b/drivers/net/wireless/ath/ath10k/debug.c
index 0dadc60..b106a06 100644
--- a/drivers/net/wireless/ath/ath10k/debug.c
+++ b/drivers/net/wireless/ath/ath10k/debug.c
@@ -1,6 +1,7 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
* Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -161,6 +162,8 @@
void ath10k_debug_print_board_info(struct ath10k *ar)
{
char boardinfo[100];
+ const struct firmware *board;
+ u32 crc;
if (ar->id.bmi_ids_valid)
scnprintf(boardinfo, sizeof(boardinfo), "%d:%d",
@@ -168,11 +171,16 @@
else
scnprintf(boardinfo, sizeof(boardinfo), "N/A");
+ board = ar->normal_mode_fw.board;
+ if (!IS_ERR_OR_NULL(board))
+ crc = crc32_le(0, board->data, board->size);
+ else
+ crc = 0;
+
ath10k_info(ar, "board_file api %d bmi_id %s crc32 %08x",
ar->bd_api,
boardinfo,
- crc32_le(0, ar->normal_mode_fw.board->data,
- ar->normal_mode_fw.board->size));
+ crc);
}
void ath10k_debug_print_boot_info(struct ath10k *ar)
diff --git a/drivers/net/wireless/ath/ath10k/htt_rx.c b/drivers/net/wireless/ath/ath10k/htt_rx.c
index ba1fe61..a3c2180 100644
--- a/drivers/net/wireless/ath/ath10k/htt_rx.c
+++ b/drivers/net/wireless/ath/ath10k/htt_rx.c
@@ -214,11 +214,12 @@
spin_lock_bh(&htt->rx_ring.lock);
ret = ath10k_htt_rx_ring_fill_n(htt, (htt->rx_ring.fill_level -
htt->rx_ring.fill_cnt));
- spin_unlock_bh(&htt->rx_ring.lock);
if (ret)
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
+
return ret;
}
@@ -230,7 +231,9 @@
skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q);
+ spin_lock_bh(&htt->rx_ring.lock);
ath10k_htt_rx_ring_free(htt);
+ spin_unlock_bh(&htt->rx_ring.lock);
dma_free_coherent(htt->ar->dev,
(htt->rx_ring.size *
diff --git a/drivers/net/wireless/ath/ath10k/trace.h b/drivers/net/wireless/ath/ath10k/trace.h
index e0d00ce..5b974bb 100644
--- a/drivers/net/wireless/ath/ath10k/trace.h
+++ b/drivers/net/wireless/ath/ath10k/trace.h
@@ -152,10 +152,9 @@
);
TRACE_EVENT(ath10k_wmi_cmd,
- TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len,
- int ret),
+ TP_PROTO(struct ath10k *ar, int id, const void *buf, size_t buf_len),
- TP_ARGS(ar, id, buf, buf_len, ret),
+ TP_ARGS(ar, id, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
@@ -163,7 +162,6 @@
__field(unsigned int, id)
__field(size_t, buf_len)
__dynamic_array(u8, buf, buf_len)
- __field(int, ret)
),
TP_fast_assign(
@@ -171,17 +169,15 @@
__assign_str(driver, dev_driver_string(ar->dev));
__entry->id = id;
__entry->buf_len = buf_len;
- __entry->ret = ret;
memcpy(__get_dynamic_array(buf), buf, buf_len);
),
TP_printk(
- "%s %s id %d len %zu ret %d",
+ "%s %s id %d len %zu",
__get_str(driver),
__get_str(device),
__entry->id,
- __entry->buf_len,
- __entry->ret
+ __entry->buf_len
)
);
diff --git a/drivers/net/wireless/ath/ath10k/wmi-tlv.c b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
index f69b98f..642a441 100644
--- a/drivers/net/wireless/ath/ath10k/wmi-tlv.c
+++ b/drivers/net/wireless/ath/ath10k/wmi-tlv.c
@@ -1486,10 +1486,10 @@
bssid_len = arg->n_bssids * sizeof(struct wmi_mac_addr);
ie_len = roundup(arg->ie_len, 4);
len = (sizeof(*tlv) + sizeof(*cmd)) +
- (arg->n_channels ? sizeof(*tlv) + chan_len : 0) +
- (arg->n_ssids ? sizeof(*tlv) + ssid_len : 0) +
- (arg->n_bssids ? sizeof(*tlv) + bssid_len : 0) +
- (arg->ie_len ? sizeof(*tlv) + ie_len : 0);
+ sizeof(*tlv) + chan_len +
+ sizeof(*tlv) + ssid_len +
+ sizeof(*tlv) + bssid_len +
+ sizeof(*tlv) + ie_len;
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
diff --git a/drivers/net/wireless/ath/ath10k/wmi.c b/drivers/net/wireless/ath/ath10k/wmi.c
index e518b64..75f7a7b 100644
--- a/drivers/net/wireless/ath/ath10k/wmi.c
+++ b/drivers/net/wireless/ath/ath10k/wmi.c
@@ -1711,8 +1711,8 @@
cmd_hdr->cmd_id = __cpu_to_le32(cmd);
memset(skb_cb, 0, sizeof(*skb_cb));
+ trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len);
ret = ath10k_htc_send(&ar->htc, ar->wmi.eid, skb);
- trace_ath10k_wmi_cmd(ar, cmd_id, skb->data, skb->len, ret);
if (ret)
goto err_pull;
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index e711ab4..a1d8947 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -1037,6 +1037,41 @@
}
EXPORT_SYMBOL(cnss_force_fw_assert);
+int cnss_force_collect_rddm(struct device *dev)
+{
+ struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
+ int ret = 0;
+
+ if (!plat_priv) {
+ cnss_pr_err("plat_priv is NULL\n");
+ return -ENODEV;
+ }
+
+ if (plat_priv->device_id == QCA6174_DEVICE_ID) {
+ cnss_pr_info("Force collect rddm is not supported\n");
+ return -EOPNOTSUPP;
+ }
+
+ if (test_bit(CNSS_DRIVER_RECOVERY, &plat_priv->driver_state)) {
+ cnss_pr_info("Recovery is already in progress, ignore forced collect rddm\n");
+ return 0;
+ }
+
+ cnss_driver_event_post(plat_priv,
+ CNSS_DRIVER_EVENT_FORCE_FW_ASSERT,
+ 0, NULL);
+
+ reinit_completion(&plat_priv->rddm_complete);
+ ret = wait_for_completion_timeout
+ (&plat_priv->rddm_complete,
+ msecs_to_jiffies(CNSS_RDDM_TIMEOUT_MS));
+ if (!ret)
+ ret = -ETIMEDOUT;
+
+ return ret;
+}
+EXPORT_SYMBOL(cnss_force_collect_rddm);
+
static int cnss_cold_boot_cal_start_hdlr(struct cnss_plat_data *plat_priv)
{
int ret = 0;
@@ -1570,6 +1605,7 @@
init_completion(&plat_priv->power_up_complete);
init_completion(&plat_priv->cal_complete);
+ init_completion(&plat_priv->rddm_complete);
mutex_init(&plat_priv->dev_lock);
return 0;
@@ -1577,6 +1613,7 @@
static void cnss_misc_deinit(struct cnss_plat_data *plat_priv)
{
+ complete_all(&plat_priv->rddm_complete);
complete_all(&plat_priv->cal_complete);
complete_all(&plat_priv->power_up_complete);
device_init_wakeup(&plat_priv->plat_dev->dev, false);
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index dd14bbe..f18b08a 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -26,6 +26,7 @@
#define MAX_NO_OF_MAC_ADDR 4
#define QMI_WLFW_MAX_TIMESTAMP_LEN 32
#define QMI_WLFW_MAX_NUM_MEM_SEG 32
+#define CNSS_RDDM_TIMEOUT_MS 20000
#define CNSS_EVENT_SYNC BIT(0)
#define CNSS_EVENT_UNINTERRUPTIBLE BIT(1)
@@ -251,6 +252,7 @@
u8 *diag_reg_read_buf;
bool cal_done;
char firmware_name[13];
+ struct completion rddm_complete;
};
struct cnss_plat_data *cnss_get_plat_priv(struct platform_device *plat_dev);
diff --git a/drivers/net/wireless/cnss2/pci.c b/drivers/net/wireless/cnss2/pci.c
index c974a1bf..df8f4ad 100644
--- a/drivers/net/wireless/cnss2/pci.c
+++ b/drivers/net/wireless/cnss2/pci.c
@@ -17,6 +17,7 @@
#include <linux/of.h>
#include <linux/pm_runtime.h>
#include <linux/memblock.h>
+#include <linux/completion.h>
#include <soc/qcom/ramdump.h>
#include "main.h"
@@ -554,6 +555,11 @@
return;
}
+ if (test_bit(CNSS_MHI_RDDM_DONE, &plat_priv->driver_state)) {
+ cnss_pr_dbg("RDDM already collected, return\n");
+ return;
+ }
+
cnss_pci_collect_dump_info(pci_priv, true);
}
@@ -1529,6 +1535,12 @@
if (!plat_priv)
return -ENODEV;
+ if (test_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state)) {
+ cnss_pr_err("RDDM already collected 0x%x, return\n",
+ pci_priv->mhi_state);
+ return 0;
+ }
+
ret = cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_TRIGGER_RDDM);
if (ret) {
cnss_pr_err("Failed to trigger RDDM, err = %d\n", ret);
@@ -1907,6 +1919,8 @@
return "RESUME";
case CNSS_MHI_TRIGGER_RDDM:
return "TRIGGER_RDDM";
+ case CNSS_MHI_RDDM_DONE:
+ return "RDDM_DONE";
default:
return "UNKNOWN";
}
@@ -1983,6 +1997,9 @@
if (dump_data->nentries > 0)
plat_priv->ramdump_info_v2.dump_data_valid = true;
+
+ cnss_pci_set_mhi_state(pci_priv, CNSS_MHI_RDDM_DONE);
+ complete(&plat_priv->rddm_complete);
}
void cnss_pci_clear_dump_info(struct cnss_pci_data *pci_priv)
@@ -2188,6 +2205,8 @@
!test_bit(CNSS_MHI_TRIGGER_RDDM, &pci_priv->mhi_state))
return 0;
break;
+ case CNSS_MHI_RDDM_DONE:
+ return 0;
default:
cnss_pr_err("Unhandled MHI state: %s(%d)\n",
cnss_mhi_state_to_str(mhi_state), mhi_state);
@@ -2216,6 +2235,7 @@
case CNSS_MHI_POWER_OFF:
case CNSS_MHI_FORCE_POWER_OFF:
clear_bit(CNSS_MHI_POWER_ON, &pci_priv->mhi_state);
+ clear_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
break;
case CNSS_MHI_SUSPEND:
set_bit(CNSS_MHI_SUSPEND, &pci_priv->mhi_state);
@@ -2225,6 +2245,9 @@
break;
case CNSS_MHI_TRIGGER_RDDM:
break;
+ case CNSS_MHI_RDDM_DONE:
+ set_bit(CNSS_MHI_RDDM_DONE, &pci_priv->mhi_state);
+ break;
default:
cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
}
@@ -2283,6 +2306,8 @@
case CNSS_MHI_TRIGGER_RDDM:
ret = mhi_force_rddm_mode(pci_priv->mhi_ctrl);
break;
+ case CNSS_MHI_RDDM_DONE:
+ break;
default:
cnss_pr_err("Unhandled MHI state (%d)\n", mhi_state);
ret = -EINVAL;
diff --git a/drivers/net/wireless/cnss2/pci.h b/drivers/net/wireless/cnss2/pci.h
index 6476ce1..32dd323 100644
--- a/drivers/net/wireless/cnss2/pci.h
+++ b/drivers/net/wireless/cnss2/pci.h
@@ -31,6 +31,7 @@
CNSS_MHI_RESUME,
CNSS_MHI_TRIGGER_RDDM,
CNSS_MHI_RDDM,
+ CNSS_MHI_RDDM_DONE,
};
struct cnss_msi_user {
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 95e9641..4bb36dc 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -2569,9 +2569,6 @@
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
IEEE80211_VHT_CAP_TXSTBC |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
IEEE80211_VHT_CAP_RXSTBC_4 |
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
sband->vht_cap.vht_mcs.rx_mcs_map =
diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
index ae87b39..2e92872 100644
--- a/drivers/net/wireless/rndis_wlan.c
+++ b/drivers/net/wireless/rndis_wlan.c
@@ -2928,6 +2928,8 @@
while (buflen >= sizeof(*auth_req)) {
auth_req = (void *)buf;
+ if (buflen < le32_to_cpu(auth_req->length))
+ return;
type = "unknown";
flags = le32_to_cpu(auth_req->flags);
pairwise_error = false;
diff --git a/drivers/net/wireless/ti/wlcore/cmd.c b/drivers/net/wireless/ti/wlcore/cmd.c
index 7f4da72..96f83f0 100644
--- a/drivers/net/wireless/ti/wlcore/cmd.c
+++ b/drivers/net/wireless/ti/wlcore/cmd.c
@@ -35,6 +35,7 @@
#include "wl12xx_80211.h"
#include "cmd.h"
#include "event.h"
+#include "ps.h"
#include "tx.h"
#include "hw_ops.h"
@@ -191,6 +192,10 @@
timeout_time = jiffies + msecs_to_jiffies(WL1271_EVENT_TIMEOUT);
+ ret = wl1271_ps_elp_wakeup(wl);
+ if (ret < 0)
+ return ret;
+
do {
if (time_after(jiffies, timeout_time)) {
wl1271_debug(DEBUG_CMD, "timeout waiting for event %d",
@@ -222,6 +227,7 @@
} while (!event);
out:
+ wl1271_ps_elp_sleep(wl);
kfree(events_vector);
return ret;
}
diff --git a/drivers/net/xen-netback/hash.c b/drivers/net/xen-netback/hash.c
index 3c4c58b..3b6fb5b 100644
--- a/drivers/net/xen-netback/hash.c
+++ b/drivers/net/xen-netback/hash.c
@@ -332,20 +332,22 @@
u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len,
u32 off)
{
- u32 *mapping = &vif->hash.mapping[off];
+ u32 *mapping = vif->hash.mapping;
struct gnttab_copy copy_op = {
.source.u.ref = gref,
.source.domid = vif->domid,
- .dest.u.gmfn = virt_to_gfn(mapping),
.dest.domid = DOMID_SELF,
- .dest.offset = xen_offset_in_page(mapping),
- .len = len * sizeof(u32),
+ .len = len * sizeof(*mapping),
.flags = GNTCOPY_source_gref
};
- if ((off + len > vif->hash.size) || copy_op.len > XEN_PAGE_SIZE)
+ if ((off + len < off) || (off + len > vif->hash.size) ||
+ len > XEN_PAGE_SIZE / sizeof(*mapping))
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
+ copy_op.dest.u.gmfn = virt_to_gfn(mapping + off);
+ copy_op.dest.offset = xen_offset_in_page(mapping + off);
+
while (len-- != 0)
if (mapping[off++] >= vif->num_queues)
return XEN_NETIF_CTRL_STATUS_INVALID_PARAMETER;
diff --git a/drivers/net/xen-netfront.c b/drivers/net/xen-netfront.c
index 7f6af10..3c1adb3 100644
--- a/drivers/net/xen-netfront.c
+++ b/drivers/net/xen-netfront.c
@@ -906,7 +906,11 @@
BUG_ON(pull_to <= skb_headlen(skb));
__pskb_pull_tail(skb, pull_to - skb_headlen(skb));
}
- BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
+ if (unlikely(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS)) {
+ queue->rx.rsp_cons = ++cons;
+ kfree_skb(nskb);
+ return ~0U;
+ }
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
skb_frag_page(nfrag),
@@ -1043,6 +1047,8 @@
skb->len += rx->status;
i = xennet_fill_frags(queue, skb, &tmpq);
+ if (unlikely(i == ~0U))
+ goto err;
if (rx->flags & XEN_NETRXF_csum_blank)
skb->ip_summed = CHECKSUM_PARTIAL;
diff --git a/drivers/nvme/target/rdma.c b/drivers/nvme/target/rdma.c
index 53bd325..2dfd877 100644
--- a/drivers/nvme/target/rdma.c
+++ b/drivers/nvme/target/rdma.c
@@ -65,6 +65,7 @@
struct nvmet_req req;
+ bool allocated;
u8 n_rdma;
u32 flags;
u32 invalidate_rkey;
@@ -167,11 +168,19 @@
unsigned long flags;
spin_lock_irqsave(&queue->rsps_lock, flags);
- rsp = list_first_entry(&queue->free_rsps,
+ rsp = list_first_entry_or_null(&queue->free_rsps,
struct nvmet_rdma_rsp, free_list);
- list_del(&rsp->free_list);
+ if (likely(rsp))
+ list_del(&rsp->free_list);
spin_unlock_irqrestore(&queue->rsps_lock, flags);
+ if (unlikely(!rsp)) {
+ rsp = kmalloc(sizeof(*rsp), GFP_KERNEL);
+ if (unlikely(!rsp))
+ return NULL;
+ rsp->allocated = true;
+ }
+
return rsp;
}
@@ -180,6 +189,11 @@
{
unsigned long flags;
+ if (rsp->allocated) {
+ kfree(rsp);
+ return;
+ }
+
spin_lock_irqsave(&rsp->queue->rsps_lock, flags);
list_add_tail(&rsp->free_list, &rsp->queue->free_rsps);
spin_unlock_irqrestore(&rsp->queue->rsps_lock, flags);
@@ -755,6 +769,15 @@
cmd->queue = queue;
rsp = nvmet_rdma_get_rsp(queue);
+ if (unlikely(!rsp)) {
+ /*
+ * we get here only under memory pressure,
+ * silently drop and have the host retry
+ * as we can't even fail it.
+ */
+ nvmet_rdma_post_recv(queue->dev, cmd);
+ return;
+ }
rsp->queue = queue;
rsp->cmd = cmd;
rsp->flags = 0;
diff --git a/drivers/of/unittest.c b/drivers/of/unittest.c
index 90b5a89..0a1ebbb 100644
--- a/drivers/of/unittest.c
+++ b/drivers/of/unittest.c
@@ -548,6 +548,9 @@
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts0");
if (!np) {
pr_err("missing testcase data\n");
@@ -622,6 +625,9 @@
struct of_phandle_args args;
int i, rc;
+ if (of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)
+ return;
+
np = of_find_node_by_path("/testcase-data/interrupts/interrupts-extended0");
if (!np) {
pr_err("missing testcase data\n");
@@ -778,15 +784,19 @@
pdev = of_find_device_by_node(np);
unittest(pdev, "device 1 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq == -EPROBE_DEFER, "device deferred probe failed - %d\n", irq);
+ if (!(of_irq_workarounds & OF_IMAP_OLDWORLD_MAC)) {
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq == -EPROBE_DEFER,
+ "device deferred probe failed - %d\n", irq);
- /* Test that a parsing failure does not return -EPROBE_DEFER */
- np = of_find_node_by_path("/testcase-data/testcase-device2");
- pdev = of_find_device_by_node(np);
- unittest(pdev, "device 2 creation failed\n");
- irq = platform_get_irq(pdev, 0);
- unittest(irq < 0 && irq != -EPROBE_DEFER, "device parsing error failed - %d\n", irq);
+ /* Test that a parsing failure does not return -EPROBE_DEFER */
+ np = of_find_node_by_path("/testcase-data/testcase-device2");
+ pdev = of_find_device_by_node(np);
+ unittest(pdev, "device 2 creation failed\n");
+ irq = platform_get_irq(pdev, 0);
+ unittest(irq < 0 && irq != -EPROBE_DEFER,
+ "device parsing error failed - %d\n", irq);
+ }
np = of_find_node_by_path("/testcase-data/platform-tests");
unittest(np, "No testcase data in device tree\n");
diff --git a/drivers/pci/host/pci-aardvark.c b/drivers/pci/host/pci-aardvark.c
index 11bad82..1dbd09c 100644
--- a/drivers/pci/host/pci-aardvark.c
+++ b/drivers/pci/host/pci-aardvark.c
@@ -976,6 +976,7 @@
return -ENOMEM;
}
+ pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
list_for_each_entry(child, &bus->children, node)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 6b3c5c4..ccbbd4c 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -1114,12 +1114,12 @@
EXPORT_SYMBOL(pci_save_state);
static void pci_restore_config_dword(struct pci_dev *pdev, int offset,
- u32 saved_val, int retry)
+ u32 saved_val, int retry, bool force)
{
u32 val;
pci_read_config_dword(pdev, offset, &val);
- if (val == saved_val)
+ if (!force && val == saved_val)
return;
for (;;) {
@@ -1138,25 +1138,36 @@
}
static void pci_restore_config_space_range(struct pci_dev *pdev,
- int start, int end, int retry)
+ int start, int end, int retry,
+ bool force)
{
int index;
for (index = end; index >= start; index--)
pci_restore_config_dword(pdev, 4 * index,
pdev->saved_config_space[index],
- retry);
+ retry, force);
}
static void pci_restore_config_space(struct pci_dev *pdev)
{
if (pdev->hdr_type == PCI_HEADER_TYPE_NORMAL) {
- pci_restore_config_space_range(pdev, 10, 15, 0);
+ pci_restore_config_space_range(pdev, 10, 15, 0, false);
/* Restore BARs before the command register. */
- pci_restore_config_space_range(pdev, 4, 9, 10);
- pci_restore_config_space_range(pdev, 0, 3, 0);
+ pci_restore_config_space_range(pdev, 4, 9, 10, false);
+ pci_restore_config_space_range(pdev, 0, 3, 0, false);
+ } else if (pdev->hdr_type == PCI_HEADER_TYPE_BRIDGE) {
+ pci_restore_config_space_range(pdev, 12, 15, 0, false);
+
+ /*
+ * Force rewriting of prefetch registers to avoid S3 resume
+ * issues on Intel PCI bridges that occur when these
+ * registers are not explicitly written.
+ */
+ pci_restore_config_space_range(pdev, 9, 11, 0, true);
+ pci_restore_config_space_range(pdev, 0, 8, 0, false);
} else {
- pci_restore_config_space_range(pdev, 0, 15, 0);
+ pci_restore_config_space_range(pdev, 0, 15, 0, false);
}
}
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index a05d143..c7a695c 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -4236,11 +4236,6 @@
*
* 0x9d10-0x9d1b PCI Express Root port #{1-12}
*
- * The 300 series chipset suffers from the same bug so include those root
- * ports here as well.
- *
- * 0xa32c-0xa343 PCI Express Root port #{0-24}
- *
* [1] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-2.html
* [2] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-datasheet-vol-1.html
* [3] http://www.intel.com/content/www/us/en/chipsets/100-series-chipset-spec-update.html
@@ -4258,7 +4253,6 @@
case 0xa110 ... 0xa11f: case 0xa167 ... 0xa16a: /* Sunrise Point */
case 0xa290 ... 0xa29f: case 0xa2e7 ... 0xa2ee: /* Union Point */
case 0x9d10 ... 0x9d1b: /* 7th & 8th Gen Mobile */
- case 0xa32c ... 0xa343: /* 300 series */
return true;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index aa29688..4cf3aba 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -629,6 +629,7 @@
static void msm_gpio_irq_unmask(struct irq_data *d)
{
struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+ uint32_t irqtype = irqd_get_trigger_type(d);
struct msm_pinctrl *pctrl = gpiochip_get_data(gc);
const struct msm_pingroup *g;
unsigned long flags;
@@ -638,6 +639,12 @@
spin_lock_irqsave(&pctrl->lock, flags);
+ if (irqtype & (IRQF_TRIGGER_HIGH | IRQF_TRIGGER_LOW)) {
+ val = readl_relaxed(pctrl->regs + g->intr_status_reg);
+ val &= ~BIT(g->intr_status_bit);
+ writel_relaxed(val, pctrl->regs + g->intr_status_reg);
+ }
+
val = readl(pctrl->regs + g->intr_cfg_reg);
val |= BIT(g->intr_enable_bit);
writel(val, pctrl->regs + g->intr_cfg_reg);
diff --git a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
index 54d8f56..a62c939 100644
--- a/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
+++ b/drivers/pinctrl/qcom/pinctrl-sdxpoorwills.c
@@ -1169,7 +1169,7 @@
{51, 93},
{52, 94},
{53, 95},
- {54, 97},
+ {54, 97, 1},
{55, 98},
};
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_com.h b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
index ebc3b8f..dde53ba 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_com.h
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_com.h
@@ -234,6 +234,7 @@
EP_PCIE_RES_DM_CORE,
EP_PCIE_RES_ELBI,
EP_PCIE_RES_IATU,
+ EP_PCIE_RES_TCSR_PERST,
EP_PCIE_MAX_RES,
};
@@ -325,6 +326,7 @@
void __iomem *dm_core;
void __iomem *elbi;
void __iomem *iatu;
+ void __iomem *tcsr_perst_en;
struct msm_bus_scale_pdata *bus_scale_table;
u32 bus_client;
diff --git a/drivers/platform/msm/ep_pcie/ep_pcie_core.c b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
index 5c3828b..613dcc1 100644
--- a/drivers/platform/msm/ep_pcie/ep_pcie_core.c
+++ b/drivers/platform/msm/ep_pcie/ep_pcie_core.c
@@ -91,6 +91,7 @@
{"dm_core", NULL, NULL},
{"elbi", NULL, NULL},
{"iatu", NULL, NULL},
+ {"tcsr_pcie_perst_en", NULL, NULL},
};
static const struct ep_pcie_irq_info_t ep_pcie_irq_info[EP_PCIE_MAX_IRQ] = {
@@ -527,6 +528,8 @@
static void ep_pcie_config_mmio(struct ep_pcie_dev_t *dev)
{
+ u32 mhi_status;
+
EP_PCIE_DBG(dev,
"Initial version of MMIO is:0x%x\n",
readl_relaxed(dev->mmio + PCIE20_MHIVER));
@@ -538,6 +541,14 @@
return;
}
+ mhi_status = readl_relaxed(dev->mmio + PCIE20_MHISTATUS);
+ if (mhi_status & BIT(2)) {
+ EP_PCIE_DBG(dev,
+ "MHISYS error is set:%d, proceed to MHI\n",
+ mhi_status);
+ return;
+ }
+
ep_pcie_write_reg(dev->mmio, PCIE20_MHICFG, 0x02800880);
ep_pcie_write_reg(dev->mmio, PCIE20_BHI_EXECENV, 0x2);
ep_pcie_write_reg(dev->mmio, PCIE20_MHICTRL, 0x0);
@@ -1219,6 +1230,7 @@
dev->dm_core = dev->res[EP_PCIE_RES_DM_CORE].base;
dev->elbi = dev->res[EP_PCIE_RES_ELBI].base;
dev->iatu = dev->res[EP_PCIE_RES_IATU].base;
+ dev->tcsr_perst_en = dev->res[EP_PCIE_RES_TCSR_PERST].base;
out:
kfree(clkfreq);
@@ -1247,6 +1259,11 @@
spin_lock_irqsave(&dev->isr_lock, irqsave_flags);
+ if (dev->enumerated) {
+ EP_PCIE_DBG(dev, "PCIe V%d: Enumeration already done\n",
+ dev->rev);
+ goto done;
+ }
dev->enumerated = true;
dev->link_status = EP_PCIE_LINK_ENABLED;
@@ -1280,6 +1297,7 @@
"PCIe V%d: do not notify client about linkup.\n",
dev->rev);
+done:
spin_unlock_irqrestore(&dev->isr_lock, irqsave_flags);
}
@@ -1342,42 +1360,59 @@
}
dev->power_on = true;
+ /* check link status during initial bootup */
+ if (!dev->enumerated) {
+ val = readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS);
+ val = val & PARF_XMLH_LINK_UP;
+ EP_PCIE_DBG(dev, "PCIe V%d: Link status is 0x%x.\n",
+ dev->rev, val);
+ if (val) {
+ EP_PCIE_INFO(dev,
+ "PCIe V%d: link initialized by bootloader for LE PCIe endpoint; skip link training in HLOS.\n",
+ dev->rev);
+ ep_pcie_core_init(dev, true);
+ dev->link_status = EP_PCIE_LINK_UP;
+ dev->l23_ready = false;
+ goto checkbme;
+ } else {
+ ltssm_en = readl_relaxed(dev->parf
+ + PCIE20_PARF_LTSSM) & BIT(8);
+
+ if (ltssm_en) {
+ EP_PCIE_ERR(dev,
+ "PCIe V%d: link is not up when LTSSM has already enabled by bootloader.\n",
+ dev->rev);
+ ret = EP_PCIE_ERROR;
+ goto link_fail;
+ } else {
+ EP_PCIE_DBG(dev,
+ "PCIe V%d: Proceed with regular link training.\n",
+ dev->rev);
+ }
+ }
+ }
+
+ ret = ep_pcie_reset_init(dev);
+ if (ret)
+ goto link_fail;
}
if (!(opt & EP_PCIE_OPT_ENUM))
goto out;
- /* check link status during initial bootup */
- if (!dev->enumerated) {
- val = readl_relaxed(dev->parf + PCIE20_PARF_PM_STTS);
- val = val & PARF_XMLH_LINK_UP;
- EP_PCIE_DBG(dev, "PCIe V%d: Link status is 0x%x.\n", dev->rev,
- val);
- if (val) {
- EP_PCIE_INFO(dev,
- "PCIe V%d: link initialized by bootloader for LE PCIe endpoint; skip link training in HLOS.\n",
- dev->rev);
- ep_pcie_core_init(dev, true);
- dev->link_status = EP_PCIE_LINK_UP;
- dev->l23_ready = false;
- goto checkbme;
- } else {
- ltssm_en = readl_relaxed(dev->parf
- + PCIE20_PARF_LTSSM) & BIT(8);
+ EP_PCIE_DBG(dev,
+ "TCSR PERST_EN value before configure:0x%x\n",
+ readl_relaxed(dev->tcsr_perst_en + 0x258));
- if (ltssm_en) {
- EP_PCIE_ERR(dev,
- "PCIe V%d: link is not up when LTSSM has already enabled by bootloader.\n",
- dev->rev);
- ret = EP_PCIE_ERROR;
- goto link_fail;
- } else {
- EP_PCIE_DBG(dev,
- "PCIe V%d: Proceed with regular link training.\n",
- dev->rev);
- }
- }
- }
+ /*
+ * Delatch PERST_EN with TCSR to avoid device reset
+ * during host reboot case.
+ */
+ writel_relaxed(0, dev->tcsr_perst_en + 0x258);
+
+ EP_PCIE_DBG(dev,
+ "TCSR PERST_EN value after configure:0x%x\n",
+ readl_relaxed(dev->tcsr_perst_en));
if (opt & EP_PCIE_OPT_AST_WAKE) {
/* assert PCIe WAKE# */
@@ -1430,9 +1465,6 @@
}
}
- ret = ep_pcie_reset_init(dev);
- if (ret)
- goto link_fail;
/* init PCIe PHY */
ep_pcie_phy_init(dev);
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
index 6c66600..34ddd63 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_mhi_client.c
@@ -1743,13 +1743,14 @@
return 0;
}
-static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels)
+static int ipa_mhi_suspend_channels(struct ipa_mhi_channel_ctx *channels,
+ int max_channels)
{
int i;
int res;
IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ for (i = 0; i < max_channels; i++) {
if (!channels[i].valid)
continue;
if (channels[i].state !=
@@ -1779,7 +1780,7 @@
}
static int ipa_mhi_stop_event_update_channels(
- struct ipa_mhi_channel_ctx *channels)
+ struct ipa_mhi_channel_ctx *channels, int max_channels)
{
int i;
int res;
@@ -1788,7 +1789,7 @@
return 0;
IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ for (i = 0; i < max_channels; i++) {
if (!channels[i].valid)
continue;
if (channels[i].state !=
@@ -1847,14 +1848,14 @@
}
static int ipa_mhi_resume_channels(bool LPTransitionRejected,
- struct ipa_mhi_channel_ctx *channels)
+ struct ipa_mhi_channel_ctx *channels, int max_channels)
{
int i;
int res;
struct ipa_mhi_channel_ctx *channel;
IPA_MHI_FUNC_ENTRY();
- for (i = 0; i < IPA_MHI_MAX_UL_CHANNELS; i++) {
+ for (i = 0; i < max_channels; i++) {
if (!channels[i].valid)
continue;
if (channels[i].state !=
@@ -1903,7 +1904,8 @@
*force_clear = false;
- res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels);
+ res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
+ IPA_MHI_MAX_UL_CHANNELS);
if (res) {
IPA_MHI_ERR("ipa_mhi_suspend_ul_channels failed %d\n", res);
goto fail_suspend_ul_channel;
@@ -1964,7 +1966,7 @@
}
res = ipa_mhi_stop_event_update_channels(
- ipa_mhi_client_ctx->ul_channels);
+ ipa_mhi_client_ctx->ul_channels, IPA_MHI_MAX_UL_CHANNELS);
if (res) {
IPA_MHI_ERR(
"ipa_mhi_stop_event_update_ul_channels failed %d\n",
@@ -2080,7 +2082,8 @@
{
int res;
- res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+ res = ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
+ IPA_MHI_MAX_DL_CHANNELS);
if (res) {
IPA_MHI_ERR(
"ipa_mhi_suspend_channels for dl failed %d\n", res);
@@ -2088,7 +2091,8 @@
}
res = ipa_mhi_stop_event_update_channels
- (ipa_mhi_client_ctx->dl_channels);
+ (ipa_mhi_client_ctx->dl_channels,
+ IPA_MHI_MAX_DL_CHANNELS);
if (res) {
IPA_MHI_ERR("failed to stop event update on DL %d\n", res);
goto fail_stop_event_update_dl_channel;
@@ -2113,7 +2117,8 @@
fail_stop_event_update_dl_channel:
ipa_mhi_resume_channels(true,
- ipa_mhi_client_ctx->dl_channels);
+ ipa_mhi_client_ctx->dl_channels,
+ IPA_MHI_MAX_DL_CHANNELS);
fail_suspend_dl_channel:
return res;
}
@@ -2226,7 +2231,8 @@
#endif
IPA_ACTIVE_CLIENTS_DEC_SIMPLE();
fail_suspend_ul_channel:
- ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels);
+ ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->ul_channels,
+ IPA_MHI_MAX_UL_CHANNELS);
if (force_clear) {
if (
ipa_mhi_disable_force_clear(ipa_mhi_client_ctx->qmi_req_id)) {
@@ -2237,7 +2243,8 @@
ipa_mhi_client_ctx->qmi_req_id++;
}
fail_suspend_dl_channel:
- ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->dl_channels);
+ ipa_mhi_resume_channels(true, ipa_mhi_client_ctx->dl_channels,
+ IPA_MHI_MAX_DL_CHANNELS);
ipa_mhi_set_state(IPA_MHI_STATE_STARTED);
return res;
}
@@ -2272,7 +2279,8 @@
if (ipa_mhi_client_ctx->rm_cons_state == IPA_MHI_RM_STATE_REQUESTED) {
/* resume all DL channels */
res = ipa_mhi_resume_channels(false,
- ipa_mhi_client_ctx->dl_channels);
+ ipa_mhi_client_ctx->dl_channels,
+ IPA_MHI_MAX_DL_CHANNELS);
if (res) {
IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
res);
@@ -2310,7 +2318,8 @@
/* resume all UL channels */
res = ipa_mhi_resume_channels(false,
- ipa_mhi_client_ctx->ul_channels);
+ ipa_mhi_client_ctx->ul_channels,
+ IPA_MHI_MAX_UL_CHANNELS);
if (res) {
IPA_MHI_ERR("ipa_mhi_resume_ul_channels failed %d\n", res);
goto fail_resume_ul_channels;
@@ -2318,7 +2327,8 @@
if (!dl_channel_resumed) {
res = ipa_mhi_resume_channels(false,
- ipa_mhi_client_ctx->dl_channels);
+ ipa_mhi_client_ctx->dl_channels,
+ IPA_MHI_MAX_DL_CHANNELS);
if (res) {
IPA_MHI_ERR("ipa_mhi_resume_dl_channels failed %d\n",
res);
@@ -2339,9 +2349,11 @@
return 0;
fail_set_state:
- ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+ ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
+ IPA_MHI_MAX_DL_CHANNELS);
fail_resume_dl_channels2:
- ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels);
+ ipa_mhi_suspend_channels(ipa_mhi_client_ctx->ul_channels,
+ IPA_MHI_MAX_UL_CHANNELS);
fail_resume_ul_channels:
if (!ipa_pm_is_used())
ipa_mhi_release_prod();
@@ -2354,7 +2366,8 @@
ipa_pm_deactivate_sync(ipa_mhi_client_ctx->pm_hdl);
fail_pm_activate:
#endif
- ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels);
+ ipa_mhi_suspend_channels(ipa_mhi_client_ctx->dl_channels,
+ IPA_MHI_MAX_DL_CHANNELS);
fail_resume_dl_channels:
ipa_mhi_set_state(IPA_MHI_STATE_SUSPENDED);
return res;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 6740fcb..2d7e7467 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -1076,6 +1076,7 @@
* @index_table_expansion_addr: index expansion table address
* @public_ip_addr: ip address of nat table
* @pdn_mem: pdn config table SW cache memory structure
+ * @is_tmp_mem_allocated: indicate if tmp mem has been allocated
*/
struct ipa3_nat_mem {
struct ipa3_nat_ipv6ct_common_mem dev;
@@ -1083,6 +1084,7 @@
char *index_table_expansion_addr;
u32 public_ip_addr;
struct ipa_mem_buffer pdn_mem;
+ bool is_tmp_mem_allocated;
};
/**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
index 12d6274..5aa01be 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_nat.c
@@ -266,14 +266,21 @@
mutex_lock(&dev->lock);
- dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
- dev->tmp_mem->vaddr, dev->tmp_mem->dma_handle);
- kfree(dev->tmp_mem);
+ if (dev->tmp_mem != NULL &&
+ ipa3_ctx->nat_mem.is_tmp_mem_allocated == false) {
+ dev->tmp_mem = NULL;
+ } else if (dev->tmp_mem != NULL &&
+ ipa3_ctx->nat_mem.is_tmp_mem_allocated) {
+ dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
+ dev->tmp_mem->vaddr, dev->tmp_mem->dma_handle);
+ kfree(dev->tmp_mem);
+ dev->tmp_mem = NULL;
+ ipa3_ctx->nat_mem.is_tmp_mem_allocated = false;
+ }
device_destroy(dev->class, dev->dev_num);
unregister_chrdev_region(dev->dev_num, 1);
class_destroy(dev->class);
dev->is_dev_init = false;
-
mutex_unlock(&dev->lock);
IPADBG("return\n");
@@ -296,10 +303,15 @@
/*
* Allocate NAT/IPv6CT temporary memory. The memory is never deleted,
* because provided to HW once NAT or IPv6CT table is deleted.
- * NULL is a legal value
*/
tmp_mem = ipa3_nat_ipv6ct_allocate_tmp_memory();
+ if (tmp_mem == NULL) {
+ IPAERR("unable to allocate tmp_mem\n");
+ return -ENOMEM;
+ }
+ ipa3_ctx->nat_mem.is_tmp_mem_allocated = true;
+
if (ipa3_nat_ipv6ct_init_device(
&ipa3_ctx->nat_mem.dev,
IPA_NAT_DEV_NAME,
@@ -328,10 +340,11 @@
fail_init_ipv6ct_dev:
ipa3_nat_ipv6ct_destroy_device(&ipa3_ctx->nat_mem.dev);
fail_init_nat_dev:
- if (tmp_mem != NULL) {
+ if (tmp_mem != NULL && ipa3_ctx->nat_mem.is_tmp_mem_allocated) {
dma_free_coherent(ipa3_ctx->pdev, IPA_NAT_IPV6CT_TEMP_MEM_SIZE,
tmp_mem->vaddr, tmp_mem->dma_handle);
kfree(tmp_mem);
+ ipa3_ctx->nat_mem.is_tmp_mem_allocated = false;
}
return result;
}
diff --git a/drivers/platform/msm/mhi_dev/mhi.c b/drivers/platform/msm/mhi_dev/mhi.c
index 3199b29..56a75da 100644
--- a/drivers/platform/msm/mhi_dev/mhi.c
+++ b/drivers/platform/msm/mhi_dev/mhi.c
@@ -940,8 +940,6 @@
struct mhi_addr host_addr;
struct mhi_dev_channel *ch;
struct mhi_dev_ring *ring;
- char *connected[2] = { "MHI_CHANNEL_STATE_12=CONNECTED", NULL};
- char *disconnected[2] = { "MHI_CHANNEL_STATE_12=DISCONNECTED", NULL};
ch_id = el->generic.chid;
mhi_log(MHI_MSG_VERBOSE, "for channel:%d and cmd:%d\n",
@@ -1027,9 +1025,7 @@
mhi_update_state_info(ch_id, MHI_STATE_CONNECTED);
/* Trigger callback to clients */
mhi_dev_trigger_cb(ch_id);
- if (ch_id == MHI_CLIENT_MBIM_OUT)
- kobject_uevent_env(&mhi_ctx->dev->kobj,
- KOBJ_CHANGE, connected);
+ mhi_uci_chan_state_notify(mhi, ch_id, MHI_STATE_CONNECTED);
break;
case MHI_DEV_RING_EL_STOP:
if (ch_id >= HW_CHANNEL_BASE) {
@@ -1083,9 +1079,10 @@
mutex_unlock(&ch->ch_lock);
mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED);
- if (ch_id == MHI_CLIENT_MBIM_OUT)
- kobject_uevent_env(&mhi_ctx->dev->kobj,
- KOBJ_CHANGE, disconnected);
+ /* Trigger callback to clients */
+ mhi_dev_trigger_cb(ch_id);
+ mhi_uci_chan_state_notify(mhi, ch_id,
+ MHI_STATE_DISCONNECTED);
}
break;
case MHI_DEV_RING_EL_RESET:
@@ -1159,9 +1156,9 @@
pr_err("Error sending command completion event\n");
mutex_unlock(&ch->ch_lock);
mhi_update_state_info(ch_id, MHI_STATE_DISCONNECTED);
- if (ch_id == MHI_CLIENT_MBIM_OUT)
- kobject_uevent_env(&mhi_ctx->dev->kobj,
- KOBJ_CHANGE, disconnected);
+ mhi_dev_trigger_cb(ch_id);
+ mhi_uci_chan_state_notify(mhi, ch_id,
+ MHI_STATE_DISCONNECTED);
}
break;
default:
@@ -1324,13 +1321,25 @@
}
}
+static void mhi_update_state_info_all(enum mhi_ctrl_info info)
+{
+ int i;
+ struct mhi_dev_client_cb_reason reason;
+
+ mhi_ctx->ctrl_info = info;
+ for (i = 0; i < MHI_MAX_CHANNELS; ++i)
+ channel_state_info[i].ctrl_info = info;
+
+ /* For legacy reasons for QTI client */
+ reason.reason = MHI_DEV_CTRL_UPDATE;
+ uci_ctrl_update(&reason);
+}
+
static int mhi_dev_abort(struct mhi_dev *mhi)
{
struct mhi_dev_channel *ch;
struct mhi_dev_ring *ring;
int ch_id = 0, rc = 0;
- char *disconnected_12[2] = { "MHI_CHANNEL_STATE_12=DISCONNECTED", NULL};
- char *disconnected_14[2] = { "MHI_CHANNEL_STATE_14=DISCONNECTED", NULL};
/* Hard stop all the channels */
for (ch_id = 0; ch_id < mhi->cfg.channels; ch_id++) {
@@ -1344,19 +1353,9 @@
mutex_unlock(&ch->ch_lock);
}
- /* Update ctrl node */
- mhi_update_state_info(MHI_DEV_UEVENT_CTRL, MHI_STATE_DISCONNECTED);
- mhi_update_state_info(MHI_CLIENT_MBIM_OUT, MHI_STATE_DISCONNECTED);
- mhi_update_state_info(MHI_CLIENT_QMI_OUT, MHI_STATE_DISCONNECTED);
- rc = kobject_uevent_env(&mhi_ctx->dev->kobj,
- KOBJ_CHANGE, disconnected_12);
- if (rc)
- pr_err("Error sending uevent:%d\n", rc);
-
- rc = kobject_uevent_env(&mhi_ctx->dev->kobj,
- KOBJ_CHANGE, disconnected_14);
- if (rc)
- pr_err("Error sending uevent:%d\n", rc);
+ /* Update channel state and notify clients */
+ mhi_update_state_info_all(MHI_STATE_DISCONNECTED);
+ mhi_uci_chan_state_notify_all(mhi, MHI_STATE_DISCONNECTED);
flush_workqueue(mhi->ring_init_wq);
flush_workqueue(mhi->pending_ring_wq);
@@ -1496,7 +1495,7 @@
struct mhi_dev_ring *ring;
enum mhi_dev_state state;
enum mhi_dev_event event = 0;
- bool mhi_reset = false;
+ u32 mhi_reset;
uint32_t bhi_imgtxdb = 0;
mutex_lock(&mhi_ctx->mhi_lock);
@@ -1984,6 +1983,8 @@
int rc;
ch = handle->channel;
+ if (!ch)
+ return -EINVAL;
rc = ch->ring->rd_offset == ch->ring->wr_offset;
@@ -2369,7 +2370,7 @@
{
int rc = 0;
uint32_t syserr, max_cnt = 0, bhi_intvec = 0;
- bool mhi_reset;
+ u32 mhi_reset;
enum mhi_dev_state state;
/* Check if MHI is in syserr */
@@ -2379,6 +2380,16 @@
mhi_log(MHI_MSG_VERBOSE, "mhi_syserr = 0x%X\n", syserr);
if (syserr) {
+ /* Poll for the host to set the reset bit */
+ rc = mhi_dev_mmio_get_mhi_state(mhi, &state, &mhi_reset);
+ if (rc) {
+ pr_err("%s: get mhi state failed\n", __func__);
+ return rc;
+ }
+
+ mhi_log(MHI_MSG_VERBOSE, "mhi_state = 0x%X, reset = %d\n",
+ state, mhi_reset);
+
rc = mhi_dev_mmio_read(mhi, BHI_INTVEC, &bhi_intvec);
if (rc)
return rc;
@@ -2398,7 +2409,11 @@
pr_err("%s: get mhi state failed\n", __func__);
return rc;
}
- while (mhi_reset != true && max_cnt < MHI_SUSPEND_TIMEOUT) {
+
+ mhi_log(MHI_MSG_VERBOSE, "mhi_state = 0x%X, reset = %d\n",
+ state, mhi_reset);
+
+ while (mhi_reset != 0x1 && max_cnt < MHI_SUSPEND_TIMEOUT) {
/* Wait for Host to set the reset */
msleep(MHI_SUSPEND_MIN);
rc = mhi_dev_mmio_get_mhi_state(mhi, &state,
@@ -2429,7 +2444,7 @@
struct ep_pcie_msi_config msi_cfg;
struct mhi_dev *mhi = container_of(work,
struct mhi_dev, ring_init_cb_work);
- bool mhi_reset;
+ u32 mhi_reset;
enum mhi_dev_state state;
uint32_t max_cnt = 0, bhi_intvec = 0;
@@ -2749,11 +2764,7 @@
ring->ring_cache_dma_handle);
}
- for (i = 0; i < mhi->cfg.channels; i++)
- mutex_destroy(&mhi->ch[i].ch_lock);
-
devm_kfree(&pdev->dev, mhi->mmio_backup);
- devm_kfree(&pdev->dev, mhi->ch);
devm_kfree(&pdev->dev, mhi->ring);
mhi_dev_sm_exit(mhi);
@@ -2781,14 +2792,20 @@
if (!mhi->ring)
return -ENOMEM;
- mhi->ch = devm_kzalloc(&pdev->dev,
+ /*
+ * mhi_init is also called during device reset, in
+ * which case channel mem will already be allocated.
+ */
+ if (!mhi->ch) {
+ mhi->ch = devm_kzalloc(&pdev->dev,
(sizeof(struct mhi_dev_channel) *
(mhi->cfg.channels)), GFP_KERNEL);
- if (!mhi->ch)
- return -ENOMEM;
+ if (!mhi->ch)
+ return -ENOMEM;
- for (i = 0; i < mhi->cfg.channels; i++)
- mutex_init(&mhi->ch[i].ch_lock);
+ for (i = 0; i < mhi->cfg.channels; i++)
+ mutex_init(&mhi->ch[i].ch_lock);
+ }
spin_lock_init(&mhi->lock);
mhi->mmio_backup = devm_kzalloc(&pdev->dev,
@@ -2896,6 +2913,15 @@
struct platform_device *pdev;
int rc = 0;
+ /*
+ * There could be multiple calls to this function if device gets
+ * multiple link-up events (bme irqs).
+ */
+ if (mhi_ctx->init_done) {
+ mhi_log(MHI_MSG_INFO, "mhi init already done, returning\n");
+ return 0;
+ }
+
pdev = mhi_ctx->pdev;
INIT_WORK(&mhi_ctx->chdb_ctrl_work, mhi_dev_scheduler);
@@ -3023,6 +3049,8 @@
disable_irq(mhi_ctx->mhi_irq);
}
+ mhi_ctx->init_done = true;
+
return 0;
}
diff --git a/drivers/platform/msm/mhi_dev/mhi.h b/drivers/platform/msm/mhi_dev/mhi.h
index fae39e0..d715207 100644
--- a/drivers/platform/msm/mhi_dev/mhi.h
+++ b/drivers/platform/msm/mhi_dev/mhi.h
@@ -564,6 +564,9 @@
/* iATU is required to map control and data region */
bool config_iatu;
+ /* Indicates if mhi init is done */
+ bool init_done;
+
/* MHI state info */
enum mhi_ctrl_info ctrl_info;
@@ -982,7 +985,7 @@
* @mhi_reset: MHI device reset from host.
*/
int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state,
- bool *mhi_reset);
+ u32 *mhi_reset);
/**
* mhi_dev_mmio_init() - Initializes the MMIO and reads the Number of event
@@ -1092,5 +1095,17 @@
void mhi_dev_notify_a7_event(struct mhi_dev *mhi);
void uci_ctrl_update(struct mhi_dev_client_cb_reason *reason);
+/**
+ * mhi_uci_chan_state_notify_all - Notifies channel state updates for
+ * all clients who have uevents enabled.
+ */
+void mhi_uci_chan_state_notify_all(struct mhi_dev *mhi,
+ enum mhi_ctrl_info ch_state);
+/**
+ * mhi_uci_chan_state_notify - Notifies channel state update to the client
+ * if uevents are enabled.
+ */
+void mhi_uci_chan_state_notify(struct mhi_dev *mhi,
+ enum mhi_client_channel ch_id, enum mhi_ctrl_info ch_state);
#endif /* _MHI_H */
diff --git a/drivers/platform/msm/mhi_dev/mhi_mmio.c b/drivers/platform/msm/mhi_dev/mhi_mmio.c
index 28d465a..22cf671 100644
--- a/drivers/platform/msm/mhi_dev/mhi_mmio.c
+++ b/drivers/platform/msm/mhi_dev/mhi_mmio.c
@@ -259,7 +259,7 @@
EXPORT_SYMBOL(mhi_dev_mmio_disable_erdb_a7);
int mhi_dev_mmio_get_mhi_state(struct mhi_dev *dev, enum mhi_dev_state *state,
- bool *mhi_reset)
+ u32 *mhi_reset)
{
uint32_t reg_value = 0;
int rc = 0;
@@ -279,9 +279,12 @@
return rc;
if (reg_value & MHICTRL_RESET_MASK)
- *mhi_reset = true;
+ *mhi_reset = 1;
+ else
+ *mhi_reset = 0;
- pr_debug("MHICTRL is 0x%x\n", reg_value);
+ mhi_log(MHI_MSG_VERBOSE, "MHICTRL is 0x%x, reset:%d\n",
+ reg_value, *mhi_reset);
return 0;
}
diff --git a/drivers/platform/msm/mhi_dev/mhi_uci.c b/drivers/platform/msm/mhi_dev/mhi_uci.c
index 87dddfd..6cf7420 100644
--- a/drivers/platform/msm/mhi_dev/mhi_uci.c
+++ b/drivers/platform/msm/mhi_dev/mhi_uci.c
@@ -83,6 +83,8 @@
void (*tre_notif_cb)(struct mhi_dev_client_cb_reason *reason);
/* Write completion - false if not needed */
bool wr_cmpl;
+ /* Uevent broadcast of channel state */
+ bool state_bcast;
};
@@ -146,7 +148,10 @@
MAX_NR_TRBS_PER_CHAN,
MHI_DIR_OUT,
NULL,
- NULL
+ NULL,
+ NULL,
+ false,
+ true
},
{
MHI_CLIENT_MBIM_IN,
@@ -162,7 +167,10 @@
MAX_NR_TRBS_PER_CHAN,
MHI_DIR_OUT,
NULL,
- NULL
+ NULL,
+ NULL,
+ false,
+ true
},
{
MHI_CLIENT_QMI_IN,
@@ -1143,6 +1151,71 @@
}
+void mhi_uci_chan_state_notify_all(struct mhi_dev *mhi,
+ enum mhi_ctrl_info ch_state)
+{
+ unsigned int i;
+ const struct chan_attr *chan_attrib;
+
+ for (i = 0; i < ARRAY_SIZE(uci_chan_attr_table); i++) {
+ chan_attrib = &uci_chan_attr_table[i];
+ if (chan_attrib->state_bcast) {
+ uci_log(UCI_DBG_ERROR, "Calling notify for ch %d\n",
+ chan_attrib->chan_id);
+ mhi_uci_chan_state_notify(mhi, chan_attrib->chan_id,
+ ch_state);
+ }
+ }
+}
+EXPORT_SYMBOL(mhi_uci_chan_state_notify_all);
+
+void mhi_uci_chan_state_notify(struct mhi_dev *mhi,
+ enum mhi_client_channel ch_id, enum mhi_ctrl_info ch_state)
+{
+ struct uci_client *uci_handle;
+ char *buf[2];
+ int rc;
+
+ if (ch_id < 0 || ch_id >= MHI_MAX_SOFTWARE_CHANNELS) {
+ uci_log(UCI_DBG_ERROR, "Invalid chan %d\n", ch_id);
+ return;
+ }
+
+ uci_handle = &uci_ctxt.client_handles[CHAN_TO_CLIENT(ch_id)];
+ if (!uci_handle->in_chan_attr ||
+ !uci_handle->in_chan_attr->state_bcast) {
+ uci_log(UCI_DBG_VERBOSE, "Uevents not enabled for chan %d\n",
+ ch_id);
+ return;
+ }
+
+ if (ch_state == MHI_STATE_CONNECTED) {
+ buf[0] = kasprintf(GFP_KERNEL,
+ "MHI_CHANNEL_STATE_%d=CONNECTED", ch_id);
+ buf[1] = NULL;
+ } else if (ch_state == MHI_STATE_DISCONNECTED) {
+ buf[0] = kasprintf(GFP_KERNEL,
+ "MHI_CHANNEL_STATE_%d=DISCONNECTED", ch_id);
+ buf[1] = NULL;
+ } else {
+ uci_log(UCI_DBG_ERROR, "Unsupported chan state %d\n", ch_state);
+ return;
+ }
+
+ if (!buf[0]) {
+ uci_log(UCI_DBG_ERROR, "kasprintf failed for uevent buf!\n");
+ return;
+ }
+
+ rc = kobject_uevent_env(&mhi->dev->kobj, KOBJ_CHANGE, buf);
+ if (rc)
+ uci_log(UCI_DBG_ERROR,
+ "Sending uevent failed for chan %d\n", ch_id);
+
+ kfree(buf[0]);
+}
+EXPORT_SYMBOL(mhi_uci_chan_state_notify);
+
void uci_ctrl_update(struct mhi_dev_client_cb_reason *reason)
{
struct uci_ctrl *uci_ctrl_handle = NULL;
diff --git a/drivers/platform/x86/alienware-wmi.c b/drivers/platform/x86/alienware-wmi.c
index 0056294..fe41993 100644
--- a/drivers/platform/x86/alienware-wmi.c
+++ b/drivers/platform/x86/alienware-wmi.c
@@ -518,6 +518,7 @@
if (obj && obj->type == ACPI_TYPE_INTEGER)
*out_data = (u32) obj->integer.value;
}
+ kfree(output.pointer);
return status;
}
diff --git a/drivers/power/reset/msm-poweroff.c b/drivers/power/reset/msm-poweroff.c
index 37ae308..c49e254 100644
--- a/drivers/power/reset/msm-poweroff.c
+++ b/drivers/power/reset/msm-poweroff.c
@@ -87,6 +87,8 @@
static struct kobject dload_kobj;
static void *dload_type_addr;
+static bool force_warm_reboot;
+
static int dload_set(const char *val, const struct kernel_param *kp);
/* interface for exporting attributes */
struct reset_attribute {
@@ -302,8 +304,11 @@
(cmd != NULL && cmd[0] != '\0'));
}
+ if (force_warm_reboot)
+ pr_info("Forcing a warm reset of the system\n");
+
/* Hard reset the PMIC unless memory contents must be maintained. */
- if (need_warm_reset)
+ if (force_warm_reboot || need_warm_reset)
qpnp_pon_system_pwr_off(PON_POWER_OFF_WARM_RESET);
else
qpnp_pon_system_pwr_off(PON_POWER_OFF_HARD_RESET);
@@ -684,6 +689,9 @@
if (!download_mode)
scm_disable_sdi();
+ force_warm_reboot = of_property_read_bool(dev->of_node,
+ "qcom,force-warm-reboot");
+
return 0;
err_restart_reason:
diff --git a/drivers/power/reset/vexpress-poweroff.c b/drivers/power/reset/vexpress-poweroff.c
index 102f95a..e9e749f 100644
--- a/drivers/power/reset/vexpress-poweroff.c
+++ b/drivers/power/reset/vexpress-poweroff.c
@@ -35,6 +35,7 @@
}
static struct device *vexpress_power_off_device;
+static atomic_t vexpress_restart_nb_refcnt = ATOMIC_INIT(0);
static void vexpress_power_off(void)
{
@@ -99,10 +100,13 @@
int err;
vexpress_restart_device = dev;
- err = register_restart_handler(&vexpress_restart_nb);
- if (err) {
- dev_err(dev, "cannot register restart handler (err=%d)\n", err);
- return err;
+ if (atomic_inc_return(&vexpress_restart_nb_refcnt) == 1) {
+ err = register_restart_handler(&vexpress_restart_nb);
+ if (err) {
+ dev_err(dev, "cannot register restart handler (err=%d)\n", err);
+ atomic_dec(&vexpress_restart_nb_refcnt);
+ return err;
+ }
}
device_create_file(dev, &dev_attr_active);
diff --git a/drivers/power/supply/power_supply_core.c b/drivers/power/supply/power_supply_core.c
index 077d237..77b6885 100644
--- a/drivers/power/supply/power_supply_core.c
+++ b/drivers/power/supply/power_supply_core.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/init.h>
#include <linux/slab.h>
+#include <linux/delay.h>
#include <linux/device.h>
#include <linux/notifier.h>
#include <linux/err.h>
@@ -141,8 +142,13 @@
struct power_supply *psy = container_of(work, struct power_supply,
deferred_register_work.work);
- if (psy->dev.parent)
- mutex_lock(&psy->dev.parent->mutex);
+ if (psy->dev.parent) {
+ while (!mutex_trylock(&psy->dev.parent->mutex)) {
+ if (psy->removing)
+ return;
+ msleep(10);
+ }
+ }
psy_register_cooler(psy->dev.parent, psy);
power_supply_changed(psy);
@@ -948,6 +954,7 @@
void power_supply_unregister(struct power_supply *psy)
{
WARN_ON(atomic_dec_return(&psy->use_cnt));
+ psy->removing = true;
cancel_work_sync(&psy->changed_work);
cancel_delayed_work_sync(&psy->deferred_register_work);
sysfs_remove_link(&psy->dev.kobj, "powers");
diff --git a/drivers/power/supply/power_supply_sysfs.c b/drivers/power/supply/power_supply_sysfs.c
index a0b9bca..07715b7 100644
--- a/drivers/power/supply/power_supply_sysfs.c
+++ b/drivers/power/supply/power_supply_sysfs.c
@@ -340,6 +340,7 @@
POWER_SUPPLY_ATTR(soh),
POWER_SUPPLY_ATTR(qc_opti_disable),
POWER_SUPPLY_ATTR(fcc_stepper_enable),
+ POWER_SUPPLY_ATTR(cc_soc),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/supply/qcom/qg-soc.c b/drivers/power/supply/qcom/qg-soc.c
index 711bd2b..a4da904 100644
--- a/drivers/power/supply/qcom/qg-soc.c
+++ b/drivers/power/supply/qcom/qg-soc.c
@@ -53,7 +53,7 @@
chip->sys_soc = CAP(QG_MIN_SOC, QG_MAX_SOC, chip->sys_soc);
- if (chip->sys_soc == QG_MIN_SOC) {
+ if (chip->sys_soc < 100) {
/* Hold SOC to 1% of VBAT has not dropped below cutoff */
rc = qg_get_battery_voltage(chip, &vbat_uv);
if (!rc && vbat_uv >= (vcutoff_uv + VBAT_LOW_HYST_UV))
diff --git a/drivers/power/supply/qcom/qpnp-qg.c b/drivers/power/supply/qcom/qpnp-qg.c
index 12ab956..a206f68 100644
--- a/drivers/power/supply/qcom/qpnp-qg.c
+++ b/drivers/power/supply/qcom/qpnp-qg.c
@@ -1061,10 +1061,6 @@
pr_err("Failed to update SDAM params, rc=%d\n", rc);
}
- if (chip->udata.param[QG_CHARGE_COUNTER].valid)
- chip->charge_counter_uah =
- chip->udata.param[QG_CHARGE_COUNTER].data;
-
if (chip->udata.param[QG_ESR].valid)
chip->esr_last = chip->udata.param[QG_ESR].data;
@@ -1545,6 +1541,26 @@
return 0;
}
+static int qg_get_charge_counter(struct qpnp_qg *chip, int *charge_counter)
+{
+ int rc, cc_soc = 0;
+ int64_t temp = 0;
+
+ rc = qg_get_learned_capacity(chip, &temp);
+ if (rc < 0 || !temp)
+ rc = qg_get_nominal_capacity((int *)&temp, 250, true);
+
+ if (rc < 0) {
+ pr_err("Failed to get FCC for charge-counter rc=%d\n", rc);
+ return rc;
+ }
+
+ cc_soc = CAP(0, 100, DIV_ROUND_CLOSEST(chip->cc_soc, 100));
+ *charge_counter = div_s64(temp * cc_soc, 100);
+
+ return 0;
+}
+
static int qg_get_ttf_param(void *data, enum ttf_param param, int *val)
{
union power_supply_propval prop = {0, };
@@ -1732,7 +1748,7 @@
pval->intval = chip->bp.qg_profile_version;
break;
case POWER_SUPPLY_PROP_CHARGE_COUNTER:
- pval->intval = chip->charge_counter_uah;
+ rc = qg_get_charge_counter(chip, &pval->intval);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL:
if (!chip->dt.cl_disable && chip->dt.cl_feedback_on)
@@ -1772,6 +1788,9 @@
case POWER_SUPPLY_PROP_SOH:
pval->intval = chip->soh;
break;
+ case POWER_SUPPLY_PROP_CC_SOC:
+ rc = qg_get_cc_soc(chip, &pval->intval);
+ break;
default:
pr_debug("Unsupported property %d\n", psp);
break;
@@ -1822,6 +1841,7 @@
POWER_SUPPLY_PROP_ESR_ACTUAL,
POWER_SUPPLY_PROP_ESR_NOMINAL,
POWER_SUPPLY_PROP_SOH,
+ POWER_SUPPLY_PROP_CC_SOC,
};
static const struct power_supply_desc qg_psy_desc = {
diff --git a/drivers/power/supply/qcom/qpnp-smbcharger.c b/drivers/power/supply/qcom/qpnp-smbcharger.c
index e9e080d..896509d 100644
--- a/drivers/power/supply/qcom/qpnp-smbcharger.c
+++ b/drivers/power/supply/qcom/qpnp-smbcharger.c
@@ -4043,6 +4043,11 @@
u8 reg;
int rc;
+ if (!is_bms_psy_present(chip)) {
+ dev_err(chip->dev, "Couldn't access bms psy\n");
+ return;
+ }
+
reg = (value > LED_OFF) ? CHG_LED_ON << CHG_LED_SHIFT :
CHG_LED_OFF << CHG_LED_SHIFT;
pval.intval = value > LED_OFF ? 1 : 0;
@@ -4090,6 +4095,11 @@
u8 reg;
int rc;
+ if (!is_bms_psy_present(chip)) {
+ dev_err(chip->dev, "Couldn't access bms psy\n");
+ return;
+ }
+
pval.intval = (blinking == 0) ? 0 : 1;
power_supply_set_property(chip->bms_psy, POWER_SUPPLY_PROP_HI_POWER,
&pval);
diff --git a/drivers/power/supply/qcom/qpnp-vm-bms.c b/drivers/power/supply/qcom/qpnp-vm-bms.c
index 6295bfc..ef05178 100644
--- a/drivers/power/supply/qcom/qpnp-vm-bms.c
+++ b/drivers/power/supply/qcom/qpnp-vm-bms.c
@@ -2102,6 +2102,10 @@
monitor_soc_work.work);
int rc, new_soc = 0, batt_temp;
+ /*skip if its a debug-board */
+ if (is_debug_batt_id(chip))
+ return;
+
bms_stay_awake(&chip->vbms_soc_wake_source);
calculate_delta_time(&chip->tm_sec, &chip->delta_time_s);
diff --git a/drivers/regulator/core.c b/drivers/regulator/core.c
index 1a25499..0d81304 100644
--- a/drivers/regulator/core.c
+++ b/drivers/regulator/core.c
@@ -4458,13 +4458,13 @@
!rdev->desc->fixed_uV)
rdev->is_switch = true;
+ dev_set_drvdata(&rdev->dev, rdev);
ret = device_register(&rdev->dev);
if (ret != 0) {
put_device(&rdev->dev);
goto unset_supplies;
}
- dev_set_drvdata(&rdev->dev, rdev);
rdev_init_debugfs(rdev);
rdev->proxy_consumer = regulator_proxy_consumer_register(dev,
config->of_node);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index 258a728..a5e6030 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -23,6 +23,7 @@
#include <linux/netdevice.h>
#include <linux/netdev_features.h>
#include <linux/skbuff.h>
+#include <linux/vmalloc.h>
#include <net/iucv/af_iucv.h>
#include <net/dsfield.h>
@@ -4715,7 +4716,7 @@
priv.buffer_len = oat_data.buffer_len;
priv.response_len = 0;
- priv.buffer = kzalloc(oat_data.buffer_len, GFP_KERNEL);
+ priv.buffer = vzalloc(oat_data.buffer_len);
if (!priv.buffer) {
rc = -ENOMEM;
goto out;
@@ -4756,7 +4757,7 @@
rc = -EFAULT;
out_free:
- kfree(priv.buffer);
+ vfree(priv.buffer);
out:
return rc;
}
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index e94e957..58404e6 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -491,7 +491,7 @@
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 4ca161b..efefe07 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -1836,7 +1836,7 @@
default:
dev_kfree_skb_any(skb);
QETH_CARD_TEXT(card, 3, "inbunkno");
- QETH_DBF_HEX(CTRL, 3, hdr, QETH_DBF_CTRL_LEN);
+ QETH_DBF_HEX(CTRL, 3, hdr, sizeof(*hdr));
continue;
}
work_done++;
diff --git a/drivers/scsi/bnx2i/bnx2i_hwi.c b/drivers/scsi/bnx2i/bnx2i_hwi.c
index 42921db..4ca1050 100644
--- a/drivers/scsi/bnx2i/bnx2i_hwi.c
+++ b/drivers/scsi/bnx2i/bnx2i_hwi.c
@@ -2742,6 +2742,8 @@
BNX2X_DOORBELL_PCI_BAR);
reg_off = (1 << BNX2X_DB_SHIFT) * (cid_num & 0x1FFFF);
ep->qp.ctx_base = ioremap_nocache(reg_base + reg_off, 4);
+ if (!ep->qp.ctx_base)
+ return -ENOMEM;
goto arm_cq;
}
diff --git a/drivers/scsi/ibmvscsi/ibmvscsi.c b/drivers/scsi/ibmvscsi/ibmvscsi.c
index d9534ee..e173022 100644
--- a/drivers/scsi/ibmvscsi/ibmvscsi.c
+++ b/drivers/scsi/ibmvscsi/ibmvscsi.c
@@ -93,7 +93,7 @@
static int max_events = IBMVSCSI_MAX_REQUESTS_DEFAULT + 2;
static int fast_fail = 1;
static int client_reserve = 1;
-static char partition_name[97] = "UNKNOWN";
+static char partition_name[96] = "UNKNOWN";
static unsigned int partition_number = -1;
static struct scsi_transport_template *ibmvscsi_transport_template;
@@ -259,7 +259,7 @@
ppartition_name = of_get_property(of_root, "ibm,partition-name", NULL);
if (ppartition_name)
- strncpy(partition_name, ppartition_name,
+ strlcpy(partition_name, ppartition_name,
sizeof(partition_name));
p_number_ptr = of_get_property(of_root, "ibm,partition-no", NULL);
if (p_number_ptr)
diff --git a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
index 0414843..5ed2811 100644
--- a/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
+++ b/drivers/scsi/ibmvscsi_tgt/ibmvscsi_tgt.c
@@ -3342,11 +3342,10 @@
vscsi->dds.window[LOCAL].liobn,
vscsi->dds.window[REMOTE].liobn);
- strcpy(vscsi->eye, "VSCSI ");
- strncat(vscsi->eye, vdev->name, MAX_EYE);
+ snprintf(vscsi->eye, sizeof(vscsi->eye), "VSCSI %s", vdev->name);
vscsi->dds.unit_id = vdev->unit_address;
- strncpy(vscsi->dds.partition_name, partition_name,
+ strscpy(vscsi->dds.partition_name, partition_name,
sizeof(vscsi->dds.partition_name));
vscsi->dds.partition_num = partition_number;
diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c
index 35cbd36..090fdcd 100644
--- a/drivers/scsi/megaraid/megaraid_sas_base.c
+++ b/drivers/scsi/megaraid/megaraid_sas_base.c
@@ -6193,6 +6193,9 @@
goto fail_init_mfi;
}
+ if (megasas_get_ctrl_info(instance) != DCMD_SUCCESS)
+ goto fail_init_mfi;
+
tasklet_init(&instance->isr_tasklet, instance->instancet->tasklet,
(unsigned long)instance);
diff --git a/drivers/scsi/qla2xxx/qla_target.h b/drivers/scsi/qla2xxx/qla_target.h
index 0824a81..07ea4fc 100644
--- a/drivers/scsi/qla2xxx/qla_target.h
+++ b/drivers/scsi/qla2xxx/qla_target.h
@@ -440,8 +440,8 @@
static inline int fcpcmd_is_corrupted(struct atio *atio)
{
if (atio->entry_type == ATIO_TYPE7 &&
- (le16_to_cpu(atio->attr_n_length & FCP_CMD_LENGTH_MASK) <
- FCP_CMD_LENGTH_MIN))
+ ((le16_to_cpu(atio->attr_n_length) & FCP_CMD_LENGTH_MASK) <
+ FCP_CMD_LENGTH_MIN))
return 1;
else
return 0;
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 1cb0403..1d0351b 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -1164,7 +1164,8 @@
case REQ_OP_WRITE:
return sd_setup_read_write_cmnd(cmd);
default:
- BUG();
+ WARN_ON_ONCE(1);
+ return BLKPREP_KILL;
}
}
diff --git a/drivers/soc/qcom/bg_rsb.c b/drivers/soc/qcom/bg_rsb.c
index 1b5830a..e653139 100644
--- a/drivers/soc/qcom/bg_rsb.c
+++ b/drivers/soc/qcom/bg_rsb.c
@@ -35,8 +35,8 @@
#define BGRSB_MSG_SIZE 0x08
#define TIMEOUT_MS 2000
-#define BGRSB_LDO15_VTG_MIN_UV 3300000
-#define BGRSB_LDO15_VTG_MAX_UV 3300000
+#define BGRSB_LDO15_VTG_MIN_UV 3000000
+#define BGRSB_LDO15_VTG_MAX_UV 3000000
#define BGRSB_LDO11_VTG_MIN_UV 1800000
#define BGRSB_LDO11_VTG_MAX_UV 1800000
@@ -135,6 +135,8 @@
bool calibration_needed;
bool is_calibrd;
+
+ bool is_cnfgrd;
};
static void *bgrsb_drv;
@@ -415,6 +417,7 @@
pr_err("Failed to unvote LDO-11 on BG down\n");
}
+ dev->is_cnfgrd = false;
pr_info("RSB current state is : %d\n", dev->bgrsb_current_state);
if (dev->bgrsb_current_state == BGRSB_STATE_INIT) {
@@ -452,6 +455,9 @@
else
pr_err("Failed to unvote LDO-11 on BG Glink down\n");
}
+
+ dev->is_cnfgrd = false;
+
if (dev->handle)
glink_close(dev->handle);
dev->handle = NULL;
@@ -562,6 +568,8 @@
dev->bgrsb_current_state = BGRSB_STATE_INIT;
return;
}
+
+ dev->is_cnfgrd = true;
dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
pr_debug("RSB Cofigured\n");
}
@@ -592,6 +600,7 @@
dev->bgrsb_current_state = BGRSB_STATE_INIT;
return;
}
+ dev->is_cnfgrd = true;
dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
pr_debug("Glink RSB Cofigured\n");
}
@@ -715,6 +724,11 @@
container_of(work, struct bgrsb_priv,
rsb_calibration_work);
+ if (!dev->is_cnfgrd) {
+ pr_err("RSB is not configured\n");
+ return;
+ }
+
req.cmd_id = 0x03;
req.data = dev->calbrtion_cpi;
@@ -744,6 +758,11 @@
container_of(work, struct bgrsb_priv,
bttn_configr_work);
+ if (!dev->is_cnfgrd) {
+ pr_err("RSB is not configured\n");
+ return;
+ }
+
req.cmd_id = 0x05;
req.data = dev->bttn_configs;
@@ -993,7 +1012,8 @@
goto ret_success;
if (dev->bgrsb_current_state == BGRSB_STATE_INIT) {
- if (bgrsb_ldo_work(dev, BGRSB_ENABLE_LDO11) == 0) {
+ if (dev->is_cnfgrd &&
+ bgrsb_ldo_work(dev, BGRSB_ENABLE_LDO11) == 0) {
dev->bgrsb_current_state = BGRSB_STATE_RSB_CONFIGURED;
pr_debug("RSB Cofigured\n");
goto ret_success;
diff --git a/drivers/soc/qcom/bgcom_spi.c b/drivers/soc/qcom/bgcom_spi.c
index c6fc2db..b13f06d 100644
--- a/drivers/soc/qcom/bgcom_spi.c
+++ b/drivers/soc/qcom/bgcom_spi.c
@@ -464,7 +464,6 @@
int bgcom_ahb_read(void *handle, uint32_t ahb_start_addr,
uint32_t num_words, void *read_buf)
{
- dma_addr_t dma_hndl_tx, dma_hndl_rx;
uint32_t txn_len;
uint8_t *tx_buf;
uint8_t *rx_buf;
@@ -472,7 +471,6 @@
int ret;
uint8_t cmnd = 0;
uint32_t ahb_addr = 0;
- struct spi_device *spi = get_spi_device();
if (!handle || !read_buf || num_words == 0
|| num_words > BG_SPI_MAX_WORDS) {
@@ -495,16 +493,13 @@
size = num_words*BG_SPI_WORD_SIZE;
txn_len = BG_SPI_AHB_READ_CMD_LEN + size;
-
- tx_buf = dma_zalloc_coherent(&spi->dev, txn_len,
- &dma_hndl_tx, GFP_KERNEL);
+ tx_buf = kzalloc(txn_len, GFP_KERNEL | GFP_ATOMIC);
if (!tx_buf)
return -ENOMEM;
- rx_buf = dma_zalloc_coherent(&spi->dev, txn_len,
- &dma_hndl_rx, GFP_KERNEL);
+ rx_buf = kzalloc(txn_len, GFP_KERNEL | GFP_ATOMIC);
if (!rx_buf) {
- dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl_tx);
+ kfree(tx_buf);
return -ENOMEM;
}
@@ -519,8 +514,8 @@
if (!ret)
memcpy(read_buf, rx_buf+BG_SPI_AHB_READ_CMD_LEN, size);
- dma_free_coherent(&spi->dev, txn_len, tx_buf, dma_hndl_tx);
- dma_free_coherent(&spi->dev, txn_len, rx_buf, dma_hndl_rx);
+ kfree(tx_buf);
+ kfree(rx_buf);
return ret;
}
EXPORT_SYMBOL(bgcom_ahb_read);
@@ -557,7 +552,6 @@
return -EBUSY;
}
-
mutex_lock(&cma_buffer_lock);
size = num_words*BG_SPI_WORD_SIZE;
txn_len = BG_SPI_AHB_CMD_LEN + size;
diff --git a/drivers/soc/qcom/system_pm.c b/drivers/soc/qcom/system_pm.c
index 9ef685b..e65f0b7 100644
--- a/drivers/soc/qcom/system_pm.c
+++ b/drivers/soc/qcom/system_pm.c
@@ -75,9 +75,10 @@
/**
* system_sleep_exit() - Activities done when exiting system low power modes
*/
-static void system_sleep_exit(void)
+static void system_sleep_exit(bool success)
{
- msm_rpmh_master_stats_update();
+ if (success)
+ msm_rpmh_master_stats_update();
gic_v3_dist_restore();
}
diff --git a/drivers/spi/spi-rspi.c b/drivers/spi/spi-rspi.c
index a816f07..093c9cf 100644
--- a/drivers/spi/spi-rspi.c
+++ b/drivers/spi/spi-rspi.c
@@ -597,11 +597,13 @@
ret = wait_event_interruptible_timeout(rspi->wait,
rspi->dma_callbacked, HZ);
- if (ret > 0 && rspi->dma_callbacked)
+ if (ret > 0 && rspi->dma_callbacked) {
ret = 0;
- else if (!ret) {
- dev_err(&rspi->master->dev, "DMA timeout\n");
- ret = -ETIMEDOUT;
+ } else {
+ if (!ret) {
+ dev_err(&rspi->master->dev, "DMA timeout\n");
+ ret = -ETIMEDOUT;
+ }
if (tx)
dmaengine_terminate_all(rspi->master->dma_tx);
if (rx)
@@ -1313,12 +1315,36 @@
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+#ifdef CONFIG_PM_SLEEP
+static int rspi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ return spi_master_suspend(rspi->master);
+}
+
+static int rspi_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct rspi_data *rspi = platform_get_drvdata(pdev);
+
+ return spi_master_resume(rspi->master);
+}
+
+static SIMPLE_DEV_PM_OPS(rspi_pm_ops, rspi_suspend, rspi_resume);
+#define DEV_PM_OPS &rspi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver rspi_driver = {
.probe = rspi_probe,
.remove = rspi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "renesas_spi",
+ .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(rspi_of_match),
},
};
diff --git a/drivers/spi/spi-sh-msiof.c b/drivers/spi/spi-sh-msiof.c
index cbf02eb..711ea52 100644
--- a/drivers/spi/spi-sh-msiof.c
+++ b/drivers/spi/spi-sh-msiof.c
@@ -373,7 +373,8 @@
static void sh_msiof_reset_str(struct sh_msiof_spi_priv *p)
{
- sh_msiof_write(p, STR, sh_msiof_read(p, STR));
+ sh_msiof_write(p, STR,
+ sh_msiof_read(p, STR) & ~(STR_TDREQ | STR_RDREQ));
}
static void sh_msiof_spi_write_fifo_8(struct sh_msiof_spi_priv *p,
@@ -1275,12 +1276,37 @@
};
MODULE_DEVICE_TABLE(platform, spi_driver_ids);
+#ifdef CONFIG_PM_SLEEP
+static int sh_msiof_spi_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ return spi_master_suspend(p->master);
+}
+
+static int sh_msiof_spi_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct sh_msiof_spi_priv *p = platform_get_drvdata(pdev);
+
+ return spi_master_resume(p->master);
+}
+
+static SIMPLE_DEV_PM_OPS(sh_msiof_spi_pm_ops, sh_msiof_spi_suspend,
+ sh_msiof_spi_resume);
+#define DEV_PM_OPS &sh_msiof_spi_pm_ops
+#else
+#define DEV_PM_OPS NULL
+#endif /* CONFIG_PM_SLEEP */
+
static struct platform_driver sh_msiof_spi_drv = {
.probe = sh_msiof_spi_probe,
.remove = sh_msiof_spi_remove,
.id_table = spi_driver_ids,
.driver = {
.name = "spi_sh_msiof",
+ .pm = DEV_PM_OPS,
.of_match_table = of_match_ptr(sh_msiof_match),
},
};
diff --git a/drivers/spi/spi-tegra20-slink.c b/drivers/spi/spi-tegra20-slink.c
index 85c91f5..af2880d 100644
--- a/drivers/spi/spi-tegra20-slink.c
+++ b/drivers/spi/spi-tegra20-slink.c
@@ -1063,6 +1063,24 @@
goto exit_free_master;
}
+ /* disabled clock may cause interrupt storm upon request */
+ tspi->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(tspi->clk)) {
+ ret = PTR_ERR(tspi->clk);
+ dev_err(&pdev->dev, "Can not get clock %d\n", ret);
+ goto exit_free_master;
+ }
+ ret = clk_prepare(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock prepare failed %d\n", ret);
+ goto exit_free_master;
+ }
+ ret = clk_enable(tspi->clk);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "Clock enable failed %d\n", ret);
+ goto exit_free_master;
+ }
+
spi_irq = platform_get_irq(pdev, 0);
tspi->irq = spi_irq;
ret = request_threaded_irq(tspi->irq, tegra_slink_isr,
@@ -1071,14 +1089,7 @@
if (ret < 0) {
dev_err(&pdev->dev, "Failed to register ISR for IRQ %d\n",
tspi->irq);
- goto exit_free_master;
- }
-
- tspi->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tspi->clk)) {
- dev_err(&pdev->dev, "can not get clock\n");
- ret = PTR_ERR(tspi->clk);
- goto exit_free_irq;
+ goto exit_clk_disable;
}
tspi->rst = devm_reset_control_get(&pdev->dev, "spi");
@@ -1138,6 +1149,8 @@
tegra_slink_deinit_dma_param(tspi, true);
exit_free_irq:
free_irq(spi_irq, tspi);
+exit_clk_disable:
+ clk_disable(tspi->clk);
exit_free_master:
spi_master_put(master);
return ret;
@@ -1150,6 +1163,8 @@
free_irq(tspi->irq, tspi);
+ clk_disable(tspi->clk);
+
if (tspi->tx_dma_chan)
tegra_slink_deinit_dma_param(tspi, false);
diff --git a/drivers/spmi/spmi-pmic-arb.c b/drivers/spmi/spmi-pmic-arb.c
index a26b289..e28e81d 100644
--- a/drivers/spmi/spmi-pmic-arb.c
+++ b/drivers/spmi/spmi-pmic-arb.c
@@ -551,8 +551,7 @@
static void periph_interrupt(struct spmi_pmic_arb *pa, u16 apid)
{
unsigned int irq;
- u32 status;
- int id;
+ u32 status, id;
u8 sid = (pa->apid_data[apid].ppid >> 8) & 0xF;
u8 per = pa->apid_data[apid].ppid & 0xFF;
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index f4ffac4..5af176b 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -383,6 +383,12 @@
goto out;
}
+ /* requested mapping size larger than object size */
+ if (vma->vm_end - vma->vm_start > PAGE_ALIGN(asma->size)) {
+ ret = -EINVAL;
+ goto out;
+ }
+
/* requested protection bits must match our allowed protection mask */
if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask, 0)) &
calc_vm_prot_bits(PROT_MASK, 0))) {
diff --git a/drivers/staging/rts5208/sd.c b/drivers/staging/rts5208/sd.c
index b0bbb36..9e63bdf 100644
--- a/drivers/staging/rts5208/sd.c
+++ b/drivers/staging/rts5208/sd.c
@@ -4976,7 +4976,7 @@
goto SD_Execute_Write_Cmd_Failed;
}
- rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
+ retval = rtsx_write_register(chip, SD_BYTE_CNT_L, 0xFF, 0x00);
if (retval != STATUS_SUCCESS) {
rtsx_trace(chip);
goto SD_Execute_Write_Cmd_Failed;
diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
index 04d2b6e..80205f3 100644
--- a/drivers/target/iscsi/iscsi_target.c
+++ b/drivers/target/iscsi/iscsi_target.c
@@ -1435,7 +1435,8 @@
sg_init_table(sg, ARRAY_SIZE(sg));
sg_set_buf(sg, buf, payload_length);
- sg_set_buf(sg + 1, pad_bytes, padding);
+ if (padding)
+ sg_set_buf(sg + 1, pad_bytes, padding);
ahash_request_set_crypt(hash, sg, data_crc, payload_length + padding);
@@ -3949,10 +3950,14 @@
static void iscsit_get_rx_pdu(struct iscsi_conn *conn)
{
int ret;
- u8 buffer[ISCSI_HDR_LEN], opcode;
+ u8 *buffer, opcode;
u32 checksum = 0, digest = 0;
struct kvec iov;
+ buffer = kcalloc(ISCSI_HDR_LEN, sizeof(*buffer), GFP_KERNEL);
+ if (!buffer)
+ return;
+
while (!kthread_should_stop()) {
/*
* Ensure that both TX and RX per connection kthreads
@@ -3960,7 +3965,6 @@
*/
iscsit_thread_check_cpumask(conn, current, 0);
- memset(buffer, 0, ISCSI_HDR_LEN);
memset(&iov, 0, sizeof(struct kvec));
iov.iov_base = buffer;
@@ -3969,7 +3973,7 @@
ret = rx_data(conn, &iov, 1, ISCSI_HDR_LEN);
if (ret != ISCSI_HDR_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- return;
+ break;
}
if (conn->conn_ops->HeaderDigest) {
@@ -3979,7 +3983,7 @@
ret = rx_data(conn, &iov, 1, ISCSI_CRC_LEN);
if (ret != ISCSI_CRC_LEN) {
iscsit_rx_thread_wait_for_tcp(conn);
- return;
+ break;
}
iscsit_do_crypto_hash_buf(conn->conn_rx_hash,
@@ -4003,7 +4007,7 @@
}
if (conn->conn_state == TARG_CONN_STATE_IN_LOGOUT)
- return;
+ break;
opcode = buffer[0] & ISCSI_OPCODE_MASK;
@@ -4014,13 +4018,15 @@
" while in Discovery Session, rejecting.\n", opcode);
iscsit_add_reject(conn, ISCSI_REASON_PROTOCOL_ERROR,
buffer);
- return;
+ break;
}
ret = iscsi_target_rx_opcode(conn, buffer);
if (ret < 0)
- return;
+ break;
}
+
+ kfree(buffer);
}
int iscsi_target_rx_thread(void *arg)
diff --git a/drivers/target/iscsi/iscsi_target_auth.c b/drivers/target/iscsi/iscsi_target_auth.c
index 98f75e5..f0d9730 100644
--- a/drivers/target/iscsi/iscsi_target_auth.c
+++ b/drivers/target/iscsi/iscsi_target_auth.c
@@ -26,27 +26,6 @@
#include "iscsi_target_nego.h"
#include "iscsi_target_auth.h"
-static int chap_string_to_hex(unsigned char *dst, unsigned char *src, int len)
-{
- int j = DIV_ROUND_UP(len, 2), rc;
-
- rc = hex2bin(dst, src, j);
- if (rc < 0)
- pr_debug("CHAP string contains non hex digit symbols\n");
-
- dst[j] = '\0';
- return j;
-}
-
-static void chap_binaryhex_to_asciihex(char *dst, char *src, int src_len)
-{
- int i;
-
- for (i = 0; i < src_len; i++) {
- sprintf(&dst[i*2], "%02x", (int) src[i] & 0xff);
- }
-}
-
static void chap_gen_challenge(
struct iscsi_conn *conn,
int caller,
@@ -59,7 +38,7 @@
memset(challenge_asciihex, 0, CHAP_CHALLENGE_LENGTH * 2 + 1);
get_random_bytes(chap->challenge, CHAP_CHALLENGE_LENGTH);
- chap_binaryhex_to_asciihex(challenge_asciihex, chap->challenge,
+ bin2hex(challenge_asciihex, chap->challenge,
CHAP_CHALLENGE_LENGTH);
/*
* Set CHAP_C, and copy the generated challenge into c_str.
@@ -240,9 +219,16 @@
pr_err("Could not find CHAP_R.\n");
goto out;
}
+ if (strlen(chap_r) != MD5_SIGNATURE_SIZE * 2) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
+ if (hex2bin(client_digest, chap_r, MD5_SIGNATURE_SIZE) < 0) {
+ pr_err("Malformed CHAP_R\n");
+ goto out;
+ }
pr_debug("[server] Got CHAP_R=%s\n", chap_r);
- chap_string_to_hex(client_digest, chap_r, strlen(chap_r));
tfm = crypto_alloc_shash("md5", 0, 0);
if (IS_ERR(tfm)) {
@@ -286,7 +272,7 @@
goto out;
}
- chap_binaryhex_to_asciihex(response, server_digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, server_digest, MD5_SIGNATURE_SIZE);
pr_debug("[server] MD5 Server Digest: %s\n", response);
if (memcmp(server_digest, client_digest, MD5_SIGNATURE_SIZE) != 0) {
@@ -341,9 +327,7 @@
pr_err("Could not find CHAP_C.\n");
goto out;
}
- pr_debug("[server] Got CHAP_C=%s\n", challenge);
- challenge_len = chap_string_to_hex(challenge_binhex, challenge,
- strlen(challenge));
+ challenge_len = DIV_ROUND_UP(strlen(challenge), 2);
if (!challenge_len) {
pr_err("Unable to convert incoming challenge\n");
goto out;
@@ -352,6 +336,11 @@
pr_err("CHAP_C exceeds maximum binary size of 1024 bytes\n");
goto out;
}
+ if (hex2bin(challenge_binhex, challenge, challenge_len) < 0) {
+ pr_err("Malformed CHAP_C\n");
+ goto out;
+ }
+ pr_debug("[server] Got CHAP_C=%s\n", challenge);
/*
* During mutual authentication, the CHAP_C generated by the
* initiator must not match the original CHAP_C generated by
@@ -405,7 +394,7 @@
/*
* Convert response from binary hex to ascii hext.
*/
- chap_binaryhex_to_asciihex(response, digest, MD5_SIGNATURE_SIZE);
+ bin2hex(response, digest, MD5_SIGNATURE_SIZE);
*nr_out_len += sprintf(nr_out_ptr + *nr_out_len, "CHAP_R=0x%s",
response);
*nr_out_len += 1;
diff --git a/drivers/target/iscsi/iscsi_target_tpg.c b/drivers/target/iscsi/iscsi_target_tpg.c
index 63e1dcc..761b065 100644
--- a/drivers/target/iscsi/iscsi_target_tpg.c
+++ b/drivers/target/iscsi/iscsi_target_tpg.c
@@ -637,8 +637,7 @@
none = strstr(buf1, NONE);
if (none)
goto out;
- strncat(buf1, ",", strlen(","));
- strncat(buf1, NONE, strlen(NONE));
+ strlcat(buf1, "," NONE, sizeof(buf1));
if (iscsi_update_param_value(param, buf1) < 0)
return -EINVAL;
}
diff --git a/drivers/thermal/cpu_cooling.c b/drivers/thermal/cpu_cooling.c
index 02f93f4..76e163e 100644
--- a/drivers/thermal/cpu_cooling.c
+++ b/drivers/thermal/cpu_cooling.c
@@ -339,7 +339,7 @@
unsigned long clipped_freq = ULONG_MAX, floor_freq = 0;
struct cpufreq_cooling_device *cpufreq_dev;
- if (event != CPUFREQ_ADJUST)
+ if (event != CPUFREQ_INCOMPATIBLE)
return NOTIFY_DONE;
mutex_lock(&cooling_list_lock);
diff --git a/drivers/thermal/of-thermal.c b/drivers/thermal/of-thermal.c
index 76e4567..2f1258a 100644
--- a/drivers/thermal/of-thermal.c
+++ b/drivers/thermal/of-thermal.c
@@ -396,10 +396,13 @@
mutex_lock(&tz->lock);
- if (mode == THERMAL_DEVICE_ENABLED)
+ if (mode == THERMAL_DEVICE_ENABLED) {
tz->polling_delay = data->polling_delay;
- else
+ tz->passive_delay = data->passive_delay;
+ } else {
tz->polling_delay = 0;
+ tz->passive_delay = 0;
+ }
mutex_unlock(&tz->lock);
diff --git a/drivers/tty/Kconfig b/drivers/tty/Kconfig
index 9510305..741e966 100644
--- a/drivers/tty/Kconfig
+++ b/drivers/tty/Kconfig
@@ -41,6 +41,9 @@
If unsure, say Y, or else you won't be able to do much with your new
shiny Linux system :-)
+config TTY_FLUSH_LOCAL_ECHO
+ bool
+
config CONSOLE_TRANSLATIONS
depends on VT
default y
diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
index 0475f96..442a3130 100644
--- a/drivers/tty/n_tty.c
+++ b/drivers/tty/n_tty.c
@@ -128,6 +128,10 @@
#define MASK(x) ((x) & (N_TTY_BUF_SIZE - 1))
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+static void continue_process_echoes(struct work_struct *work);
+#endif
+
static inline size_t read_cnt(struct n_tty_data *ldata)
{
return ldata->read_head - ldata->read_tail;
@@ -751,6 +755,16 @@
tail++;
}
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+ if (ldata->echo_commit != tail) {
+ if (!tty->delayed_work) {
+ INIT_DELAYED_WORK(&tty->echo_delayed_work, continue_process_echoes);
+ schedule_delayed_work(&tty->echo_delayed_work, 1);
+ }
+ tty->delayed_work = 1;
+ }
+#endif
+
not_yet_stored:
ldata->echo_tail = tail;
return old_space - space;
@@ -817,6 +831,20 @@
mutex_unlock(&ldata->output_lock);
}
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+static void continue_process_echoes(struct work_struct *work)
+{
+ struct tty_struct *tty =
+ container_of(work, struct tty_struct, echo_delayed_work.work);
+ struct n_tty_data *ldata = tty->disc_data;
+
+ mutex_lock(&ldata->output_lock);
+ tty->delayed_work = 0;
+ __process_echoes(tty);
+ mutex_unlock(&ldata->output_lock);
+}
+#endif
+
/**
* add_echo_byte - add a byte to the echo buffer
* @c: unicode byte to echo
diff --git a/drivers/tty/serial/8250/serial_cs.c b/drivers/tty/serial/8250/serial_cs.c
index 933c268..8106353 100644
--- a/drivers/tty/serial/8250/serial_cs.c
+++ b/drivers/tty/serial/8250/serial_cs.c
@@ -637,8 +637,10 @@
(link->has_func_id) &&
(link->socket->pcmcia_pfc == 0) &&
((link->func_id == CISTPL_FUNCID_MULTI) ||
- (link->func_id == CISTPL_FUNCID_SERIAL)))
- pcmcia_loop_config(link, serial_check_for_multi, info);
+ (link->func_id == CISTPL_FUNCID_SERIAL))) {
+ if (pcmcia_loop_config(link, serial_check_for_multi, info))
+ goto failed;
+ }
/*
* Apply any multi-port quirk.
diff --git a/drivers/tty/serial/cpm_uart/cpm_uart_core.c b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
index d3e3d42..0040c29f 100644
--- a/drivers/tty/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/tty/serial/cpm_uart/cpm_uart_core.c
@@ -1068,8 +1068,8 @@
/* Get the address of the host memory buffer.
*/
bdp = pinfo->rx_cur;
- while (bdp->cbd_sc & BD_SC_EMPTY)
- ;
+ if (bdp->cbd_sc & BD_SC_EMPTY)
+ return NO_POLL_CHAR;
/* If the buffer address is in the CPM DPRAM, don't
* convert it.
@@ -1104,7 +1104,11 @@
poll_chars = 0;
}
if (poll_chars <= 0) {
- poll_chars = poll_wait_key(poll_buf, pinfo);
+ int ret = poll_wait_key(poll_buf, pinfo);
+
+ if (ret == NO_POLL_CHAR)
+ return ret;
+ poll_chars = ret;
pollp = poll_buf;
}
poll_chars--;
diff --git a/drivers/tty/serial/fsl_lpuart.c b/drivers/tty/serial/fsl_lpuart.c
index 937f5e1..e2ec049 100644
--- a/drivers/tty/serial/fsl_lpuart.c
+++ b/drivers/tty/serial/fsl_lpuart.c
@@ -833,7 +833,8 @@
struct circ_buf *ring = &sport->rx_ring;
int ret, nent;
int bits, baud;
- struct tty_struct *tty = tty_port_tty_get(&sport->port.state->port);
+ struct tty_port *port = &sport->port.state->port;
+ struct tty_struct *tty = port->tty;
struct ktermios *termios = &tty->termios;
baud = tty_get_baud_rate(tty);
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index b24edf6..0d82be1 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -2197,6 +2197,14 @@
ret);
return ret;
}
+
+ ret = devm_request_irq(&pdev->dev, rtsirq, imx_rtsint, 0,
+ dev_name(&pdev->dev), sport);
+ if (ret) {
+ dev_err(&pdev->dev, "failed to request rts irq: %d\n",
+ ret);
+ return ret;
+ }
} else {
ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0,
dev_name(&pdev->dev), sport);
diff --git a/drivers/tty/serial/mvebu-uart.c b/drivers/tty/serial/mvebu-uart.c
index 45b57c2..401c983 100644
--- a/drivers/tty/serial/mvebu-uart.c
+++ b/drivers/tty/serial/mvebu-uart.c
@@ -327,8 +327,10 @@
if ((termios->c_cflag & CREAD) == 0)
port->ignore_status_mask |= STAT_RX_RDY | STAT_BRK_ERR;
- if (old)
+ if (old) {
tty_termios_copy_hw(termios, old);
+ termios->c_cflag |= CS8;
+ }
baud = uart_get_baud_rate(port, termios, old, 0, 460800);
uart_update_timeout(port, termios->c_cflag, baud);
diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
index 789c814..4305524 100644
--- a/drivers/tty/tty_io.c
+++ b/drivers/tty/tty_io.c
@@ -1475,6 +1475,7 @@
static int tty_reopen(struct tty_struct *tty)
{
struct tty_driver *driver = tty->driver;
+ int retval;
if (driver->type == TTY_DRIVER_TYPE_PTY &&
driver->subtype == PTY_TYPE_MASTER)
@@ -1488,10 +1489,14 @@
tty->count++;
- if (!tty->ldisc)
- return tty_ldisc_reinit(tty, tty->termios.c_line);
+ if (tty->ldisc)
+ return 0;
- return 0;
+ retval = tty_ldisc_reinit(tty, tty->termios.c_line);
+ if (retval)
+ tty->count--;
+
+ return retval;
}
/**
@@ -1658,6 +1663,10 @@
put_pid(tty->pgrp);
put_pid(tty->session);
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+ if (tty->echo_delayed_work.work.func)
+ cancel_delayed_work_sync(&tty->echo_delayed_work);
+#endif
free_tty_struct(tty);
}
diff --git a/drivers/tty/vt/vt_ioctl.c b/drivers/tty/vt/vt_ioctl.c
index f62c598..638eb9b 100644
--- a/drivers/tty/vt/vt_ioctl.c
+++ b/drivers/tty/vt/vt_ioctl.c
@@ -31,6 +31,8 @@
#include <asm/io.h>
#include <asm/uaccess.h>
+#include <linux/nospec.h>
+
#include <linux/kbd_kern.h>
#include <linux/vt_kern.h>
#include <linux/kbd_diacr.h>
@@ -703,6 +705,8 @@
if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
ret = -ENXIO;
else {
+ vsa.console = array_index_nospec(vsa.console,
+ MAX_NR_CONSOLES + 1);
vsa.console--;
console_lock();
ret = vc_allocate(vsa.console);
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index adc0f78..9f00165 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -470,7 +470,7 @@
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
- rv = usb_submit_urb(desc->response, GFP_ATOMIC);
+ rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
dev_err(&desc->intf->dev,
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index 893ebae..988240e 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1450,10 +1450,13 @@
struct async *as = NULL;
struct usb_ctrlrequest *dr = NULL;
unsigned int u, totlen, isofrmlen;
- int i, ret, is_in, num_sgs = 0, ifnum = -1;
+ int i, ret, num_sgs = 0, ifnum = -1;
int number_of_packets = 0;
unsigned int stream_id = 0;
void *buf;
+ bool is_in;
+ bool allow_short = false;
+ bool allow_zero = false;
unsigned long mask = USBDEVFS_URB_SHORT_NOT_OK |
USBDEVFS_URB_BULK_CONTINUATION |
USBDEVFS_URB_NO_FSBR |
@@ -1487,6 +1490,8 @@
u = 0;
switch (uurb->type) {
case USBDEVFS_URB_TYPE_CONTROL:
+ if (is_in)
+ allow_short = true;
if (!usb_endpoint_xfer_control(&ep->desc))
return -EINVAL;
/* min 8 byte setup packet */
@@ -1527,6 +1532,10 @@
break;
case USBDEVFS_URB_TYPE_BULK:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
switch (usb_endpoint_type(&ep->desc)) {
case USB_ENDPOINT_XFER_CONTROL:
case USB_ENDPOINT_XFER_ISOC:
@@ -1547,6 +1556,10 @@
if (!usb_endpoint_xfer_int(&ep->desc))
return -EINVAL;
interrupt_urb:
+ if (!is_in)
+ allow_zero = true;
+ else
+ allow_short = true;
break;
case USBDEVFS_URB_TYPE_ISO:
@@ -1691,16 +1704,21 @@
u = (is_in ? URB_DIR_IN : URB_DIR_OUT);
if (uurb->flags & USBDEVFS_URB_ISO_ASAP)
u |= URB_ISO_ASAP;
- if (uurb->flags & USBDEVFS_URB_SHORT_NOT_OK && is_in)
+ if (allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
u |= URB_SHORT_NOT_OK;
if (uurb->flags & USBDEVFS_URB_NO_FSBR)
u |= URB_NO_FSBR;
- if (uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ if (allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
u |= URB_ZERO_PACKET;
if (uurb->flags & USBDEVFS_URB_NO_INTERRUPT)
u |= URB_NO_INTERRUPT;
as->urb->transfer_flags = u;
+ if (!allow_short && uurb->flags & USBDEVFS_URB_SHORT_NOT_OK)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_SHORT_NOT_OK.\n");
+ if (!allow_zero && uurb->flags & USBDEVFS_URB_ZERO_PACKET)
+ dev_warn(&ps->dev->dev, "Requested nonsensical USBDEVFS_URB_ZERO_PACKET.\n");
+
as->urb->transfer_buffer_length = uurb->buffer_length;
as->urb->setup_packet = (unsigned char *)dr;
dr = NULL;
diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c
index 5532246..7dae981 100644
--- a/drivers/usb/core/driver.c
+++ b/drivers/usb/core/driver.c
@@ -509,7 +509,6 @@
struct device *dev;
struct usb_device *udev;
int retval = 0;
- int lpm_disable_error = -ENODEV;
if (!iface)
return -ENODEV;
@@ -530,16 +529,6 @@
iface->condition = USB_INTERFACE_BOUND;
- /* See the comment about disabling LPM in usb_probe_interface(). */
- if (driver->disable_hub_initiated_lpm) {
- lpm_disable_error = usb_unlocked_disable_lpm(udev);
- if (lpm_disable_error) {
- dev_err(&iface->dev, "%s Failed to disable LPM for driver %s\n.",
- __func__, driver->name);
- return -ENOMEM;
- }
- }
-
/* Claimed interfaces are initially inactive (suspended) and
* runtime-PM-enabled, but only if the driver has autosuspend
* support. Otherwise they are marked active, to prevent the
@@ -558,9 +547,20 @@
if (device_is_registered(dev))
retval = device_bind_driver(dev);
- /* Attempt to re-enable USB3 LPM, if the disable was successful. */
- if (!lpm_disable_error)
- usb_unlocked_enable_lpm(udev);
+ if (retval) {
+ dev->driver = NULL;
+ usb_set_intfdata(iface, NULL);
+ iface->needs_remote_wakeup = 0;
+ iface->condition = USB_INTERFACE_UNBOUND;
+
+ /*
+ * Unbound interfaces are always runtime-PM-disabled
+ * and runtime-PM-suspended
+ */
+ if (driver->supports_autosuspend)
+ pm_runtime_disable(dev);
+ pm_runtime_set_suspended(dev);
+ }
return retval;
}
diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
index bb2a4fe..82dfc60 100644
--- a/drivers/usb/core/usb.c
+++ b/drivers/usb/core/usb.c
@@ -91,6 +91,8 @@
struct usb_interface_cache *intf_cache = NULL;
int i;
+ if (!config)
+ return NULL;
for (i = 0; i < config->desc.bNumInterfaces; i++) {
if (config->intf_cache[i]->altsetting[0].desc.bInterfaceNumber
== iface_num) {
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index e8e5c32..70b3a66 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -3632,6 +3632,13 @@
/* Endpoint IRQ, handle it and return early */
if (event->type.is_devspec == 0) {
/* depevt */
+ /* If remote-wakeup attempt by device had failed, then core
+ * wouldn't give wakeup event after resume. Handle that
+ * here on ep event which indicates that bus is resumed.
+ */
+ if (dwc->b_suspend &&
+ dwc3_get_link_state(dwc) == DWC3_LINK_STATE_U0)
+ dwc3_gadget_wakeup_interrupt(dwc, false);
return dwc3_endpoint_interrupt(dwc, &event->depevt);
}
diff --git a/drivers/usb/gadget/function/f_ipc.c b/drivers/usb/gadget/function/f_ipc.c
index a79a559..77440f7 100644
--- a/drivers/usb/gadget/function/f_ipc.c
+++ b/drivers/usb/gadget/function/f_ipc.c
@@ -671,22 +671,25 @@
int temp = 0;
unsigned long flags;
- if (ipc_dev) {
- spin_lock_irqsave(&ipc_dev->lock, flags);
- temp += scnprintf(buf + temp, PAGE_SIZE - temp,
- "endpoints: %s, %s\n"
- "bytes to host: %lu\n"
- "bytes to mdm: %lu\n"
- "pending writes: %u\n"
- "pending reads: %u\n",
- ipc_dev->in->name, ipc_dev->out->name,
- ipc_dev->bytes_to_host,
- ipc_dev->bytes_to_mdm,
- ipc_dev->pending_writes,
- ipc_dev->pending_reads);
- spin_unlock_irqrestore(&ipc_dev->lock, flags);
+ if (!ipc_dev || !ipc_dev->in || !ipc_dev->out) {
+ pr_err("ipc_dev instance, or EPs not yet initialised\n");
+ return 0;
}
+ spin_lock_irqsave(&ipc_dev->lock, flags);
+ temp += scnprintf(buf + temp, PAGE_SIZE - temp,
+ "endpoints: %s, %s\n"
+ "bytes to host: %lu\n"
+ "bytes to mdm: %lu\n"
+ "pending writes: %u\n"
+ "pending reads: %u\n",
+ ipc_dev->in->name, ipc_dev->out->name,
+ ipc_dev->bytes_to_host,
+ ipc_dev->bytes_to_mdm,
+ ipc_dev->pending_writes,
+ ipc_dev->pending_reads);
+ spin_unlock_irqrestore(&ipc_dev->lock, flags);
+
return simple_read_from_buffer(ubuf, count, ppos, buf, temp);
}
@@ -695,13 +698,16 @@
{
unsigned long flags;
- if (ipc_dev) {
- spin_lock_irqsave(&ipc_dev->lock, flags);
- ipc_dev->bytes_to_host = 0;
- ipc_dev->bytes_to_mdm = 0;
- spin_unlock_irqrestore(&ipc_dev->lock, flags);
+ if (!ipc_dev) {
+ pr_err("ipc_dev instance not yet initialised\n");
+ return count;
}
+ spin_lock_irqsave(&ipc_dev->lock, flags);
+ ipc_dev->bytes_to_host = 0;
+ ipc_dev->bytes_to_mdm = 0;
+ spin_unlock_irqrestore(&ipc_dev->lock, flags);
+
return count;
}
diff --git a/drivers/usb/gadget/function/f_mtp.c b/drivers/usb/gadget/function/f_mtp.c
index 79ef286..b0b8e92 100644
--- a/drivers/usb/gadget/function/f_mtp.c
+++ b/drivers/usb/gadget/function/f_mtp.c
@@ -823,6 +823,11 @@
offset = dev->xfer_file_offset;
count = dev->xfer_file_length;
+ if (count < 0) {
+ dev->xfer_result = -EINVAL;
+ return;
+ }
+
DBG(cdev, "send_file_work(%lld %lld)\n", offset, count);
if (dev->xfer_send_header) {
@@ -939,6 +944,11 @@
offset = dev->xfer_file_offset;
count = dev->xfer_file_length;
+ if (count < 0) {
+ dev->xfer_result = -EINVAL;
+ return;
+ }
+
DBG(cdev, "receive_file_work(%lld)\n", count);
if (!IS_ALIGNED(count, dev->ep_out->maxpacket))
DBG(cdev, "%s- count(%lld) not multiple of mtu(%d)\n", __func__,
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
index 7af152b3..6712ca2 100644
--- a/drivers/usb/gadget/function/u_data_ipa.c
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -451,8 +451,9 @@
/* update IPA Parameteres here. */
port->ipa_params.usb_connection_speed = gadget->speed;
- port->ipa_params.reset_pipe_after_lpm =
- msm_dwc3_reset_ep_after_lpm(gadget);
+ if (!gadget->is_chipidea)
+ port->ipa_params.reset_pipe_after_lpm =
+ msm_dwc3_reset_ep_after_lpm(gadget);
port->ipa_params.skip_ep_cfg = true;
port->ipa_params.keep_ipa_awake = true;
port->ipa_params.cons_clnt_hdl = -1;
@@ -469,19 +470,29 @@
__func__);
goto out;
}
-
- sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
+ if (!gadget->is_chipidea) {
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB
| MSM_PRODUCER | port->src_pipe_idx;
- port->rx_req->length = 32*1024;
- port->rx_req->udc_priv = sps_params;
- configure_fifo(port->usb_bam_type,
- port->src_connection_idx,
- port->port_usb->out);
- ret = msm_ep_config(gport->out, port->rx_req);
- if (ret) {
- pr_err("msm_ep_config() failed for OUT EP\n");
- spin_unlock_irqrestore(&port->port_lock, flags);
- goto out;
+ port->rx_req->length = 32*1024;
+ port->rx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->src_connection_idx,
+ port->port_usb->out);
+ ret = msm_ep_config(gport->out, port->rx_req);
+ if (ret) {
+ pr_err("msm_ep_config() failed for OUT EP\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto out;
+ }
+ } else {
+ /* gadget->is_chipidea */
+ get_bam2bam_connection_info(port->usb_bam_type,
+ port->src_connection_idx,
+ &port->src_pipe_idx,
+ NULL, NULL, NULL);
+ sps_params = (MSM_SPS_MODE | port->src_pipe_idx |
+ MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+ port->rx_req->udc_priv = sps_params;
}
}
@@ -496,17 +507,29 @@
__func__);
goto unconfig_msm_ep_out;
}
- sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
- port->dst_pipe_idx;
- port->tx_req->length = 32*1024;
- port->tx_req->udc_priv = sps_params;
- configure_fifo(port->usb_bam_type,
- port->dst_connection_idx, gport->in);
- ret = msm_ep_config(gport->in, port->tx_req);
- if (ret) {
- pr_err("msm_ep_config() failed for IN EP\n");
- spin_unlock_irqrestore(&port->port_lock, flags);
- goto unconfig_msm_ep_out;
+ if (!gadget->is_chipidea) {
+ sps_params = MSM_SPS_MODE | MSM_DISABLE_WB |
+ port->dst_pipe_idx;
+ port->tx_req->length = 32*1024;
+ port->tx_req->udc_priv = sps_params;
+ configure_fifo(port->usb_bam_type,
+ port->dst_connection_idx, gport->in);
+
+ ret = msm_ep_config(gport->in, port->tx_req);
+ if (ret) {
+ pr_err("msm_ep_config() failed for IN EP\n");
+ spin_unlock_irqrestore(&port->port_lock, flags);
+ goto unconfig_msm_ep_out;
+ }
+ } else {
+ /* gadget->is_chipidea */
+ get_bam2bam_connection_info(port->usb_bam_type,
+ port->dst_connection_idx,
+ &port->dst_pipe_idx,
+ NULL, NULL, NULL);
+ sps_params = (MSM_SPS_MODE | port->dst_pipe_idx |
+ MSM_VENDOR_ID) & ~MSM_IS_FINITE_TRANSFER;
+ port->tx_req->udc_priv = sps_params;
}
}
@@ -1163,8 +1186,8 @@
spin_unlock_irqrestore(&port->port_lock, flags);
msm_dwc3_reset_dbm_ep(port->port_usb->in);
spin_lock_irqsave(&port->port_lock, flags);
- usb_bam_resume(port->usb_bam_type, &port->ipa_params);
}
+ usb_bam_resume(port->usb_bam_type, &port->ipa_params);
exit:
spin_unlock_irqrestore(&port->port_lock, flags);
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 22a6755..64909f3 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -538,7 +538,7 @@
}
/* push data to (open) tty */
- if (req->actual) {
+ if (req->actual && tty) {
char *packet = req->buf;
unsigned size = req->actual;
unsigned n;
diff --git a/drivers/usb/gadget/udc/fotg210-udc.c b/drivers/usb/gadget/udc/fotg210-udc.c
index 6ba122c..95df2b3 100644
--- a/drivers/usb/gadget/udc/fotg210-udc.c
+++ b/drivers/usb/gadget/udc/fotg210-udc.c
@@ -1066,12 +1066,15 @@
static int fotg210_udc_remove(struct platform_device *pdev)
{
struct fotg210_udc *fotg210 = platform_get_drvdata(pdev);
+ int i;
usb_del_gadget_udc(&fotg210->gadget);
iounmap(fotg210->reg);
free_irq(platform_get_irq(pdev, 0), fotg210);
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+ kfree(fotg210->ep[i]);
kfree(fotg210);
return 0;
@@ -1102,7 +1105,7 @@
/* initialize udc */
fotg210 = kzalloc(sizeof(struct fotg210_udc), GFP_KERNEL);
if (fotg210 == NULL)
- goto err_alloc;
+ goto err;
for (i = 0; i < FOTG210_MAX_NUM_EP; i++) {
_ep[i] = kzalloc(sizeof(struct fotg210_ep), GFP_KERNEL);
@@ -1114,7 +1117,7 @@
fotg210->reg = ioremap(res->start, resource_size(res));
if (fotg210->reg == NULL) {
pr_err("ioremap error.\n");
- goto err_map;
+ goto err_alloc;
}
spin_lock_init(&fotg210->lock);
@@ -1162,7 +1165,7 @@
fotg210->ep0_req = fotg210_ep_alloc_request(&fotg210->ep[0]->ep,
GFP_KERNEL);
if (fotg210->ep0_req == NULL)
- goto err_req;
+ goto err_map;
fotg210_init(fotg210);
@@ -1190,12 +1193,14 @@
fotg210_ep_free_request(&fotg210->ep[0]->ep, fotg210->ep0_req);
err_map:
- if (fotg210->reg)
- iounmap(fotg210->reg);
+ iounmap(fotg210->reg);
err_alloc:
+ for (i = 0; i < FOTG210_MAX_NUM_EP; i++)
+ kfree(fotg210->ep[i]);
kfree(fotg210);
+err:
return ret;
}
diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c
index 643e087..da89f3f 100644
--- a/drivers/usb/host/xhci-hub.c
+++ b/drivers/usb/host/xhci-hub.c
@@ -1257,16 +1257,17 @@
break;
}
- /* Software should not attempt to set
- * port link state above '3' (U3) and the port
- * must be enabled.
- */
- if ((temp & PORT_PE) == 0 ||
- (link_state > USB_SS_PORT_LS_U3)) {
- xhci_warn(xhci, "Cannot set link state.\n");
+ /* Port must be enabled */
+ if (!(temp & PORT_PE)) {
+ retval = -ENODEV;
+ break;
+ }
+ /* Can't set port link state above '3' (U3) */
+ if (link_state > USB_SS_PORT_LS_U3) {
+ xhci_warn(xhci, "Cannot set port %d link state %d\n",
+ wIndex, link_state);
goto error;
}
-
if (link_state == USB_SS_PORT_LS_U3) {
slot_id = xhci_find_slot_id_by_port(hcd, xhci,
wIndex + 1);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index ce9e457..c108758 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -735,10 +735,10 @@
xhci_mtk_host_enable(mtk);
xhci_dbg(xhci, "%s: restart port polling\n", __func__);
- set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
- usb_hcd_poll_rh_status(hcd);
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
+ usb_hcd_poll_rh_status(hcd);
return 0;
}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index f6782a3..b514055 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -179,6 +179,8 @@
}
if (pdev->vendor == PCI_VENDOR_ID_INTEL &&
(pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI ||
+ pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_APL_XHCI ||
pdev->device == PCI_DEVICE_ID_INTEL_DNV_XHCI))
xhci->quirks |= XHCI_MISSING_CAS;
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index e36c6c6..1e67234 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -423,6 +423,9 @@
spin_unlock_irqrestore(&dev->lock, flags);
mutex_unlock(&dev->io_mutex);
+ if (WARN_ON_ONCE(len >= sizeof(in_buffer)))
+ return -EIO;
+
return simple_read_from_buffer(buffer, count, ppos, in_buffer, len);
}
diff --git a/drivers/usb/phy/phy-msm-qusb-v2.c b/drivers/usb/phy/phy-msm-qusb-v2.c
index a17974c..41e5e32 100644
--- a/drivers/usb/phy/phy-msm-qusb-v2.c
+++ b/drivers/usb/phy/phy-msm-qusb-v2.c
@@ -140,8 +140,8 @@
bool chirp_disable;
struct pinctrl *pinctrl;
- struct pinctrl_state *atest_usb13_suspend;
- struct pinctrl_state *atest_usb13_active;
+ struct pinctrl_state *atest_usb_suspend;
+ struct pinctrl_state *atest_usb_active;
/* emulation targets specific */
void __iomem *emu_phy_base;
@@ -669,9 +669,9 @@
struct qusb_phy *qphy = container_of(timer, struct qusb_phy, timer);
int ret = 0;
- if (qphy->pinctrl && qphy->atest_usb13_suspend) {
+ if (qphy->pinctrl && qphy->atest_usb_suspend) {
ret = pinctrl_select_state(qphy->pinctrl,
- qphy->atest_usb13_suspend);
+ qphy->atest_usb_suspend);
if (ret < 0)
dev_err(qphy->phy.dev,
"pinctrl state suspend select failed\n");
@@ -687,9 +687,9 @@
dev_dbg(phy->dev, "%s\n", __func__);
- if (qphy->pinctrl && qphy->atest_usb13_active) {
+ if (qphy->pinctrl && qphy->atest_usb_active) {
ret = pinctrl_select_state(qphy->pinctrl,
- qphy->atest_usb13_active);
+ qphy->atest_usb_active);
if (ret < 0) {
dev_err(phy->dev,
"pinctrl state active select failed\n");
@@ -1306,17 +1306,28 @@
dev_err(dev, "pinctrl not available\n");
goto skip_pinctrl_config;
}
- qphy->atest_usb13_suspend = pinctrl_lookup_state(qphy->pinctrl,
+ qphy->atest_usb_suspend = pinctrl_lookup_state(qphy->pinctrl,
"atest_usb13_suspend");
- if (IS_ERR(qphy->atest_usb13_suspend)) {
- dev_err(dev, "pinctrl lookup atest_usb13_suspend failed\n");
- goto skip_pinctrl_config;
+
+ if (IS_ERR(qphy->atest_usb_suspend) &&
+ PTR_ERR(qphy->atest_usb_suspend) == -ENODEV) {
+ qphy->atest_usb_suspend = pinctrl_lookup_state(qphy->pinctrl,
+ "suspend");
+ if (IS_ERR(qphy->atest_usb_suspend)) {
+ dev_err(dev, "pinctrl lookup suspend failed\n");
+ goto skip_pinctrl_config;
+ }
}
- qphy->atest_usb13_active = pinctrl_lookup_state(qphy->pinctrl,
+ qphy->atest_usb_active = pinctrl_lookup_state(qphy->pinctrl,
"atest_usb13_active");
- if (IS_ERR(qphy->atest_usb13_active))
- dev_err(dev, "pinctrl lookup atest_usb13_active failed\n");
+ if (IS_ERR(qphy->atest_usb_active) &&
+ PTR_ERR(qphy->atest_usb_active) == -ENODEV) {
+ qphy->atest_usb_active = pinctrl_lookup_state(qphy->pinctrl,
+ "active");
+ if (IS_ERR(qphy->atest_usb_active))
+ dev_err(dev, "pinctrl lookup active failed\n");
+ }
hrtimer_init(&qphy->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
qphy->timer.function = qusb_dis_ext_pulldown_timer;
diff --git a/drivers/usb/serial/kobil_sct.c b/drivers/usb/serial/kobil_sct.c
index 813035f..7d25267 100644
--- a/drivers/usb/serial/kobil_sct.c
+++ b/drivers/usb/serial/kobil_sct.c
@@ -408,12 +408,20 @@
transfer_buffer_length,
KOBIL_TIMEOUT);
- dev_dbg(&port->dev, "%s - Send get_status_line_state URB returns: %i. Statusline: %02x\n",
- __func__, result, transfer_buffer[0]);
+ dev_dbg(&port->dev, "Send get_status_line_state URB returns: %i\n",
+ result);
+ if (result < 1) {
+ if (result >= 0)
+ result = -EIO;
+ goto out_free;
+ }
+
+ dev_dbg(&port->dev, "Statusline: %02x\n", transfer_buffer[0]);
result = 0;
if ((transfer_buffer[0] & SUSBCR_GSL_DSR) != 0)
result = TIOCM_DSR;
+out_free:
kfree(transfer_buffer);
return result;
}
diff --git a/drivers/usb/serial/usb-serial-simple.c b/drivers/usb/serial/usb-serial-simple.c
index 2674da4..6d6acf2 100644
--- a/drivers/usb/serial/usb-serial-simple.c
+++ b/drivers/usb/serial/usb-serial-simple.c
@@ -87,7 +87,8 @@
/* Motorola Tetra driver */
#define MOTOROLA_TETRA_IDS() \
- { USB_DEVICE(0x0cad, 0x9011) } /* Motorola Solutions TETRA PEI */
+ { USB_DEVICE(0x0cad, 0x9011) }, /* Motorola Solutions TETRA PEI */ \
+ { USB_DEVICE(0x0cad, 0x9012) } /* MTP6550 */
DEVICE(motorola_tetra, MOTOROLA_TETRA_IDS);
/* Novatel Wireless GPS driver */
diff --git a/drivers/usb/wusbcore/security.c b/drivers/usb/wusbcore/security.c
index 8c9421b..6bf86ca 100644
--- a/drivers/usb/wusbcore/security.c
+++ b/drivers/usb/wusbcore/security.c
@@ -230,7 +230,7 @@
result = usb_get_descriptor(usb_dev, USB_DT_SECURITY,
0, secd, sizeof(*secd));
- if (result < sizeof(*secd)) {
+ if (result < (int)sizeof(*secd)) {
dev_err(dev, "Can't read security descriptor or "
"not enough data: %d\n", result);
goto out;
diff --git a/drivers/uwb/hwa-rc.c b/drivers/uwb/hwa-rc.c
index 9a53912..5d3ba74 100644
--- a/drivers/uwb/hwa-rc.c
+++ b/drivers/uwb/hwa-rc.c
@@ -873,6 +873,7 @@
error_rc_add:
usb_put_intf(iface);
usb_put_dev(hwarc->usb_dev);
+ kfree(hwarc);
error_alloc:
uwb_rc_put(uwb_rc);
error_rc_alloc:
diff --git a/drivers/video/fbdev/aty/atyfb.h b/drivers/video/fbdev/aty/atyfb.h
index 63c4842..46e0e8b 100644
--- a/drivers/video/fbdev/aty/atyfb.h
+++ b/drivers/video/fbdev/aty/atyfb.h
@@ -332,6 +332,8 @@
extern void aty_set_pll_ct(const struct fb_info *info, const union aty_pll *pll);
extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
+extern const u8 aty_postdividers[8];
+
/*
* Hardware cursor support
@@ -358,7 +360,6 @@
extern void aty_reset_engine(const struct atyfb_par *par);
extern void aty_init_engine(struct atyfb_par *par, struct fb_info *info);
-extern u8 aty_ld_pll_ct(int offset, const struct atyfb_par *par);
void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area);
void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect);
diff --git a/drivers/video/fbdev/aty/atyfb_base.c b/drivers/video/fbdev/aty/atyfb_base.c
index 81367cf..da748c3 100644
--- a/drivers/video/fbdev/aty/atyfb_base.c
+++ b/drivers/video/fbdev/aty/atyfb_base.c
@@ -3093,17 +3093,18 @@
/*
* PLL Reference Divider M:
*/
- M = pll_regs[2];
+ M = pll_regs[PLL_REF_DIV];
/*
* PLL Feedback Divider N (Dependent on CLOCK_CNTL):
*/
- N = pll_regs[7 + (clock_cntl & 3)];
+ N = pll_regs[VCLK0_FB_DIV + (clock_cntl & 3)];
/*
* PLL Post Divider P (Dependent on CLOCK_CNTL):
*/
- P = 1 << (pll_regs[6] >> ((clock_cntl & 3) << 1));
+ P = aty_postdividers[((pll_regs[VCLK_POST_DIV] >> ((clock_cntl & 3) << 1)) & 3) |
+ ((pll_regs[PLL_EXT_CNTL] >> (2 + (clock_cntl & 3))) & 4)];
/*
* PLL Divider Q:
diff --git a/drivers/video/fbdev/aty/mach64_ct.c b/drivers/video/fbdev/aty/mach64_ct.c
index 51f29d6..af54256 100644
--- a/drivers/video/fbdev/aty/mach64_ct.c
+++ b/drivers/video/fbdev/aty/mach64_ct.c
@@ -114,7 +114,7 @@
*/
#define Maximum_DSP_PRECISION 7
-static u8 postdividers[] = {1,2,4,8,3};
+const u8 aty_postdividers[8] = {1,2,4,8,3,5,6,12};
static int aty_dsp_gt(const struct fb_info *info, u32 bpp, struct pll_ct *pll)
{
@@ -221,7 +221,7 @@
pll->vclk_post_div += (q < 64*8);
pll->vclk_post_div += (q < 32*8);
}
- pll->vclk_post_div_real = postdividers[pll->vclk_post_div];
+ pll->vclk_post_div_real = aty_postdividers[pll->vclk_post_div];
// pll->vclk_post_div <<= 6;
pll->vclk_fb_div = q * pll->vclk_post_div_real / 8;
pllvclk = (1000000 * 2 * pll->vclk_fb_div) /
@@ -512,7 +512,7 @@
u8 mclk_fb_div, pll_ext_cntl;
pll->ct.pll_ref_div = aty_ld_pll_ct(PLL_REF_DIV, par);
pll_ext_cntl = aty_ld_pll_ct(PLL_EXT_CNTL, par);
- pll->ct.xclk_post_div_real = postdividers[pll_ext_cntl & 0x07];
+ pll->ct.xclk_post_div_real = aty_postdividers[pll_ext_cntl & 0x07];
mclk_fb_div = aty_ld_pll_ct(MCLK_FB_DIV, par);
if (pll_ext_cntl & PLL_MFB_TIMES_4_2B)
mclk_fb_div <<= 1;
@@ -534,7 +534,7 @@
xpost_div += (q < 64*8);
xpost_div += (q < 32*8);
}
- pll->ct.xclk_post_div_real = postdividers[xpost_div];
+ pll->ct.xclk_post_div_real = aty_postdividers[xpost_div];
pll->ct.mclk_fb_div = q * pll->ct.xclk_post_div_real / 8;
#ifdef CONFIG_PPC
@@ -583,7 +583,7 @@
mpost_div += (q < 64*8);
mpost_div += (q < 32*8);
}
- sclk_post_div_real = postdividers[mpost_div];
+ sclk_post_div_real = aty_postdividers[mpost_div];
pll->ct.sclk_fb_div = q * sclk_post_div_real / 8;
pll->ct.spll_cntl2 = mpost_div << 4;
#ifdef DEBUG
diff --git a/drivers/video/fbdev/msm/mdp3_ctrl.c b/drivers/video/fbdev/msm/mdp3_ctrl.c
index 70864c4..8a40d70 100644
--- a/drivers/video/fbdev/msm/mdp3_ctrl.c
+++ b/drivers/video/fbdev/msm/mdp3_ctrl.c
@@ -885,6 +885,13 @@
te.hw_vsync_mode = panel_info->mipi.hw_vsync_mode;
te.tear_check_en = panel_info->te.tear_check_en;
te.sync_cfg_height = panel_info->te.sync_cfg_height;
+
+ /* For mdp3, max. value of CFG_HEIGHT is 0x7ff,
+ * for mdp5, max. value of CFG_HEIGHT is 0xffff.
+ */
+ if (te.sync_cfg_height > 0x7ff)
+ te.sync_cfg_height = 0x7ff;
+
te.vsync_init_val = panel_info->te.vsync_init_val;
te.sync_threshold_start = panel_info->te.sync_threshold_start;
te.sync_threshold_continue = panel_info->te.sync_threshold_continue;
@@ -953,7 +960,7 @@
MDSS_EVENT_UNBLANK, NULL);
rc |= panel->event_handler(panel,
MDSS_EVENT_PANEL_ON, NULL);
- if (mdss_fb_is_power_on_ulp(mfd))
+ if (mdss_fb_is_power_on_lp(mfd))
rc |= mdp3_enable_panic_ctrl();
mdp3_clk_enable(0, 0);
}
@@ -1099,7 +1106,7 @@
*/
pm_runtime_get_sync(&mdp3_res->pdev->dev);
- MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mdss_fb_is_power_on_ulp(mfd),
+ MDSS_XLOG(XLOG_FUNC_ENTRY, __LINE__, mdss_fb_is_power_on_lp(mfd),
mfd->panel_power_state);
panel = mdp3_session->panel;
@@ -1240,9 +1247,9 @@
}
}
- if (mdss_fb_is_power_on_ulp(mfd) &&
+ if (mdss_fb_is_power_on_lp(mfd) &&
(mfd->panel.type == MIPI_CMD_PANEL)) {
- pr_debug("%s: Disable MDP3 clocks in ULP\n", __func__);
+ pr_debug("%s: Disable MDP3 clocks in LP\n", __func__);
if (!mdp3_session->clk_on)
mdp3_ctrl_clk_enable(mfd, 1);
/*
@@ -1252,7 +1259,7 @@
rc = mdp3_session->dma->stop(mdp3_session->dma,
mdp3_session->intf);
if (rc)
- pr_warn("fail to stop the MDP3 dma in ULP\n");
+ pr_warn("fail to stop the MDP3 dma in LP\n");
/* Wait to ensure TG to turn off */
msleep(20);
/*
diff --git a/drivers/video/fbdev/msm/mdp3_dma.c b/drivers/video/fbdev/msm/mdp3_dma.c
index f37c378..e569a0b 100644
--- a/drivers/video/fbdev/msm/mdp3_dma.c
+++ b/drivers/video/fbdev/msm/mdp3_dma.c
@@ -295,7 +295,7 @@
vsync_clk_speed_hz = MDP_VSYNC_CLK_RATE;
- cfg = total_lines << VSYNC_TOTAL_LINES_SHIFT;
+ cfg = te->sync_cfg_height << VSYNC_TOTAL_LINES_SHIFT;
total_lines *= te->frame_rate;
vclks_line = (total_lines) ? vsync_clk_speed_hz / total_lines : 0;
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 850b872..da80971 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -3004,7 +3004,7 @@
ret = mdss_wait_sync_fence(fences[i], wait_ms);
- if (ret == -ETIME) {
+ if (ret == -ETIMEDOUT) {
wait_jf = timeout - jiffies;
wait_ms = jiffies_to_msecs(wait_jf);
if (wait_jf < 0)
@@ -3021,14 +3021,14 @@
MDSS_XLOG_TOUT_HANDLER("mdp");
ret = mdss_wait_sync_fence(fences[i], wait_ms);
- if (ret == -ETIME)
+ if (ret == -ETIMEDOUT)
break;
}
mdss_put_sync_fence(fences[i]);
}
if (ret < 0) {
- pr_err("%s: sync_fence_wait failed! ret = %x\n",
+ pr_err("%s: sync_fence_wait failed! ret = %d\n",
sync_pt_data->fence_name, ret);
for (; i < fence_cnt; i++)
mdss_put_sync_fence(fences[i]);
@@ -3704,6 +3704,19 @@
int ret = -ENOTSUPP;
u32 new_dsi_mode, dynamic_dsi_switch = 0;
+ if (mfd->panel_info->panel_dead) {
+ pr_debug("Panel dead, Signal fence and exit commit\n");
+ /*
+ * In case of ESD attack, return early from commit
+ * after signalling fences.
+ */
+ mdss_fb_release_kickoff(mfd);
+ mdss_fb_signal_timeline(sync_pt_data);
+ if ((mfd->panel.type == MIPI_CMD_PANEL) &&
+ (mfd->mdp.signal_retire_fence))
+ mfd->mdp.signal_retire_fence(mfd, 1);
+ return ret;
+ }
if (!sync_pt_data->async_wait_fences)
mdss_fb_wait_for_fence(sync_pt_data);
sync_pt_data->flushed = false;
@@ -4583,7 +4596,6 @@
struct mdp_frc_info *frc_info = NULL;
struct mdp_frc_info __user *frc_info_user;
struct msm_fb_data_type *mfd;
- struct mdss_overlay_private *mdp5_data = NULL;
ret = copy_from_user(&commit, argp, sizeof(struct mdp_layer_commit));
if (ret) {
@@ -4595,26 +4607,6 @@
if (!mfd)
return -EINVAL;
- mdp5_data = mfd_to_mdp5_data(mfd);
-
- if (mfd->panel_info->panel_dead) {
- pr_debug("early commit return\n");
- MDSS_XLOG(mfd->panel_info->panel_dead);
- /*
- * In case of an ESD attack, since we early return from the
- * commits, we need to signal the outstanding fences.
- */
- mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
- atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
- mutex_unlock(&mfd->mdp_sync_pt_data.sync_mutex);
- mdss_fb_release_fences(mfd);
- if ((mfd->panel.type == MIPI_CMD_PANEL) &&
- mfd->mdp.signal_retire_fence && mdp5_data)
- mfd->mdp.signal_retire_fence(mfd,
- mdp5_data->retire_cnt);
- return 0;
- }
-
output_layer_user = commit.commit_v1.output_layer;
if (output_layer_user) {
buffer_size = sizeof(struct mdp_output_layer);
diff --git a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
index ef69273..a3edb20 100644
--- a/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
+++ b/drivers/video/fbdev/omap2/omapfb/omapfb-ioctl.c
@@ -496,6 +496,9 @@
if (!access_ok(VERIFY_WRITE, mr->buffer, mr->buffer_size))
return -EFAULT;
+ if (mr->w > 4096 || mr->h > 4096)
+ return -EINVAL;
+
if (mr->w * mr->h * 3 > mr->buffer_size)
return -EINVAL;
@@ -509,7 +512,7 @@
mr->x, mr->y, mr->w, mr->h);
if (r > 0) {
- if (copy_to_user(mr->buffer, buf, mr->buffer_size))
+ if (copy_to_user(mr->buffer, buf, r))
r = -EFAULT;
}
diff --git a/drivers/vservices/Kconfig b/drivers/vservices/Kconfig
new file mode 100644
index 0000000..16b3bda
--- /dev/null
+++ b/drivers/vservices/Kconfig
@@ -0,0 +1,81 @@
+#
+# OKL4 Virtual Services framework
+#
+
+menuconfig VSERVICES_SUPPORT
+ tristate "OKL4 Virtual Services support"
+ default OKL4_GUEST || OKL4_VIRTUALISATION
+ select HOTPLUG
+ help
+ This option adds core support for OKL4 Virtual Services. The Virtual
+ Services framework is an inter-OS device/service sharing
+ protocol which is supported on OKL4 Microvisor virtualization
+ platforms. You will also need drivers from the following menu in
+ order to make use of it.
+
+if VSERVICES_SUPPORT
+
+config VSERVICES_CHAR_DEV
+ bool "Virtual Services user-space service API"
+ default y
+ help
+ Select this if you want to use user-space service drivers. You will
+ also need udev rules that create device nodes, and protocol code
+ generated by the OK Mill tool.
+
+config VSERVICES_DEBUG
+ bool "Virtual Services debugging support"
+ help
+ Select this if you want to enable Virtual Services core framework
+ debugging. The debug messages for various components of the Virtual
+ Services core framework can be toggled at runtime on a per-session
+ basis via sysfs. When Virtual Services debugging is enabled here,
+ but disabled at runtime it has a minimal performance impact.
+
+config VSERVICES_LOCK_DEBUG
+ bool "Debug Virtual Services state locks"
+ default DEBUG_KERNEL
+ help
+ This option enables some runtime checks that Virtual Services
+ state lock functions are used correctly in service drivers.
+
+config VSERVICES_SERVER
+ tristate "Virtual Services server support"
+ depends on SYSFS
+ default y
+ help
+ This option adds support for Virtual Services servers, which allows
+ exporting of services from this Linux to other environments. Servers
+ are created at runtime by writing to files in
+ /sys/bus/vservices-server.
+
+config VSERVICES_CLIENT
+ tristate "Virtual Services client support"
+ default y
+ help
+ This option adds support for Virtual Services clients, which allows
+ connecting to services exported from other environments.
+
+config VSERVICES_SKELETON_DRIVER
+ tristate "Virtual Services skeleton driver"
+ depends on VSERVICES_SERVER || VSERVICES_CLIENT
+ default n
+ help
+ This option adds support for a skeleton virtual service driver. This
+ driver can be used for templating or testing of virtual service
+ drivers. If unsure say N.
+
+config VSERVICES_NAMED_DEVICE
+ bool "Virtual Services use named device node in /dev"
+ default n
+ help
+ Select this if you want to use a named device name over a numeric
+ device name in /dev
+
+source "drivers/vservices/transport/Kconfig"
+
+source "drivers/vservices/protocol/Kconfig"
+
+source "drivers/vservices/Kconfig.stacks"
+
+endif # VSERVICES_SUPPORT
diff --git a/drivers/vservices/Kconfig.stacks b/drivers/vservices/Kconfig.stacks
new file mode 100644
index 0000000..97eba53
--- /dev/null
+++ b/drivers/vservices/Kconfig.stacks
@@ -0,0 +1,7 @@
+#
+# vServices drivers configuration
+#
+
+menu "Client and Server drivers"
+
+endmenu
diff --git a/drivers/vservices/Makefile b/drivers/vservices/Makefile
new file mode 100644
index 0000000..685ba0a
--- /dev/null
+++ b/drivers/vservices/Makefile
@@ -0,0 +1,16 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += vservices.o
+vservices-objs-$(CONFIG_VSERVICES_CHAR_DEV) += devio.o
+vservices-objs = session.o $(vservices-objs-y)
+
+obj-$(CONFIG_VSERVICES_CLIENT) += core_client.o
+obj-$(CONFIG_VSERVICES_SERVER) += core_server.o
+
+obj-$(CONFIG_VSERVICES_SKELETON_DRIVER) += vservices_skeleton_driver.o
+vservices_skeleton_driver-objs = skeleton_driver.o
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += transport/
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += protocol/
diff --git a/drivers/vservices/compat.h b/drivers/vservices/compat.h
new file mode 100644
index 0000000..5f6926d
--- /dev/null
+++ b/drivers/vservices/compat.h
@@ -0,0 +1,59 @@
+/*
+ * drivers/vservices/compat.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Wrapper functions/definitions for compatibility between differnet kernel
+ * versions.
+ */
+
+#ifndef _VSERVICES_COMPAT_H
+#define _VSERVICES_COMPAT_H
+
+#include <linux/workqueue.h>
+#include <linux/version.h>
+
+/* The INIT_WORK_ONSTACK macro has a slightly different name in older kernels */
+#ifndef INIT_WORK_ONSTACK
+#define INIT_WORK_ONSTACK(_work, _func) INIT_WORK_ON_STACK(_work, _func)
+#endif
+
+/*
+ * We require a workqueue with no concurrency. This is provided by
+ * create_singlethread_workqueue() in kernel prior to 2.6.36.
+ * In later versions, create_singlethread_workqueue() enables WQ_MEM_RECLAIM and
+ * thus WQ_RESCUER, which allows work items to be grabbed by a rescuer thread
+ * and run concurrently if the queue is running too slowly. We must use
+ * alloc_ordered_workqueue() instead, to disable the rescuer.
+ */
+static inline struct workqueue_struct *
+vs_create_workqueue(const char *name)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36)
+ return create_singlethread_workqueue(name);
+#else
+ return alloc_ordered_workqueue(name, 0);
+#endif
+}
+
+/*
+ * The max3 macro has only been present from 2.6.37
+ * (commit: f27c85c56b32c42bcc54a43189c1e00fdceb23ec)
+ */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37)
+#define max3(x, y, z) ({ \
+ typeof(x) _max1 = (x); \
+ typeof(y) _max2 = (y); \
+ typeof(z) _max3 = (z); \
+ (void) (&_max1 == &_max2); \
+ (void) (&_max1 == &_max3); \
+ _max1 > _max2 ? (_max1 > _max3 ? _max1 : _max3) : \
+ (_max2 > _max3 ? _max2 : _max3); })
+#endif
+
+#endif /* _VSERVICES_COMPAT_H */
diff --git a/drivers/vservices/core_client.c b/drivers/vservices/core_client.c
new file mode 100644
index 0000000..4cc78ac
--- /dev/null
+++ b/drivers/vservices/core_client.c
@@ -0,0 +1,733 @@
+/*
+ * drivers/vservices/core_client.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Client side core service application driver. This is responsible for:
+ *
+ * - automatically connecting to the server when it becomes ready;
+ * - sending a reset command to the server if something has gone wrong; and
+ * - enumerating all the available services.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/completion.h>
+#include <linux/workqueue.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+struct core_client {
+ struct vs_client_core_state state;
+ struct vs_service_device *service;
+
+ struct list_head message_queue;
+ struct mutex message_queue_lock;
+ struct work_struct message_queue_work;
+};
+
+struct pending_reset {
+ struct vs_service_device *service;
+ struct list_head list;
+};
+
+#define to_core_client(x) container_of(x, struct core_client, state)
+#define dev_to_core_client(x) to_core_client(dev_get_drvdata(x))
+
+static int vs_client_core_fatal_error(struct vs_client_core_state *state)
+{
+ struct core_client *client = to_core_client(state);
+
+ /* Force a transport level reset */
+ dev_err(&client->service->dev," Fatal error - resetting session\n");
+ return -EPROTO;
+}
+
+static struct core_client *
+vs_client_session_core_client(struct vs_session_device *session)
+{
+ struct vs_service_device *core_service = session->core_service;
+
+ if (!core_service)
+ return NULL;
+
+ return dev_to_core_client(&core_service->dev);
+}
+
+static ssize_t client_core_reset_service_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *core_service = to_vs_service_device(dev);
+ struct vs_session_device *session =
+ vs_service_get_session(core_service);
+ struct vs_service_device *target;
+ vs_service_id_t service_id;
+ unsigned long val;
+ int err;
+
+ /* Writing a valid service id to this file resets that service */
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ service_id = val;
+ target = vs_session_get_service(session, service_id);
+ if (!target)
+ return -ENODEV;
+
+ err = vs_service_reset(target, core_service);
+
+ vs_put_service(target);
+ return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(reset_service, S_IWUSR, NULL,
+ client_core_reset_service_store);
+
+static struct attribute *client_core_dev_attrs[] = {
+ &dev_attr_reset_service.attr,
+ NULL,
+};
+
+static const struct attribute_group client_core_attr_group = {
+ .attrs = client_core_dev_attrs,
+};
+
+/*
+ * Protocol callbacks
+ */
+static int
+vs_client_core_handle_service_removed(struct vs_client_core_state *state,
+ u32 service_id)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session =
+ vs_service_get_session(client->service);
+ struct vs_service_device *service;
+ int ret;
+
+ service = vs_session_get_service(session, service_id);
+ if (!service)
+ return -EINVAL;
+
+ ret = vs_service_handle_delete(service);
+ vs_put_service(service);
+ return ret;
+}
+
+static int vs_client_core_create_service(struct core_client *client,
+ struct vs_session_device *session, vs_service_id_t service_id,
+ struct vs_string *protocol_name_string,
+ struct vs_string *service_name_string)
+{
+ char *protocol_name, *service_name;
+ struct vs_service_device *service;
+ int ret = 0;
+
+ protocol_name = vs_string_dup(protocol_name_string, GFP_KERNEL);
+ if (!protocol_name) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ service_name = vs_string_dup(service_name_string, GFP_KERNEL);
+ if (!service_name) {
+ ret = -ENOMEM;
+ goto out_free_protocol_name;
+ }
+
+ service = vs_service_register(session, client->service, service_id,
+ protocol_name, service_name, NULL);
+ if (IS_ERR(service)) {
+ ret = PTR_ERR(service);
+ goto out_free_service_name;
+ }
+
+ vs_service_start(service);
+
+out_free_service_name:
+ kfree(service_name);
+out_free_protocol_name:
+ kfree(protocol_name);
+out:
+ return ret;
+}
+
+static int
+vs_client_core_handle_service_created(struct vs_client_core_state *state,
+ u32 service_id, struct vs_string service_name,
+ struct vs_string protocol_name, struct vs_mbuf *mbuf)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session =
+ vs_service_get_session(client->service);
+ int err;
+
+ vs_dev_debug(VS_DEBUG_CLIENT_CORE,
+ vs_service_get_session(client->service),
+ &client->service->dev, "Service info for %d received\n",
+ service_id);
+
+ err = vs_client_core_create_service(client, session, service_id,
+ &protocol_name, &service_name);
+ if (err)
+ dev_err(&session->dev,
+ "Failed to create service with id %d: %d\n",
+ service_id, err);
+
+ vs_client_core_core_free_service_created(state, &service_name,
+ &protocol_name, mbuf);
+
+ return err;
+}
+
+static int
+vs_client_core_send_service_reset(struct core_client *client,
+ struct vs_service_device *service)
+{
+ return vs_client_core_core_send_service_reset(&client->state,
+ service->id, GFP_KERNEL);
+}
+
+static int
+vs_client_core_queue_service_reset(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ struct core_client *client =
+ vs_client_session_core_client(session);
+ struct pending_reset *msg;
+
+ if (!client)
+ return -ENODEV;
+
+ vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+ "Sending reset for service %d\n", service->id);
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg)
+ return -ENOMEM;
+
+ mutex_lock(&client->message_queue_lock);
+
+ /* put by message_queue_work */
+ msg->service = vs_get_service(service);
+ list_add_tail(&msg->list, &client->message_queue);
+
+ mutex_unlock(&client->message_queue_lock);
+ queue_work(client->service->work_queue, &client->message_queue_work);
+
+ return 0;
+}
+
+static int vs_core_client_tx_ready(struct vs_client_core_state *state)
+{
+ struct core_client *client = to_core_client(state);
+
+ queue_work(client->service->work_queue, &client->message_queue_work);
+
+ return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+ struct core_client *client = container_of(work, struct core_client,
+ message_queue_work);
+ struct vs_session_device *session =
+ vs_service_get_session(client->service);
+ struct pending_reset *msg;
+ int err;
+
+ vs_service_state_lock(client->service);
+ if (!VSERVICE_CORE_STATE_IS_CONNECTED(client->state.state.core)) {
+ vs_service_state_unlock(client->service);
+ return;
+ }
+
+ vs_dev_debug(VS_DEBUG_CLIENT, session, &session->dev, "tx_ready\n");
+
+ mutex_lock(&client->message_queue_lock);
+ while (!list_empty(&client->message_queue)) {
+ msg = list_first_entry(&client->message_queue,
+ struct pending_reset, list);
+
+ err = vs_client_core_send_service_reset(client, msg->service);
+
+ /* If we're out of quota there's no point continuing */
+ if (err == -ENOBUFS)
+ break;
+
+ /* Any other error is fatal */
+ if (err < 0) {
+ dev_err(&client->service->dev,
+ "Failed to send pending reset for %d (%d) - resetting session",
+ msg->service->id, err);
+ vs_service_reset_nosync(client->service);
+ break;
+ }
+
+ /*
+ * The message sent successfully - remove it from the queue.
+ * The corresponding vs_get_service() was done when the pending
+ * message was enqueued.
+ */
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+ }
+ mutex_unlock(&client->message_queue_lock);
+ vs_service_state_unlock(client->service);
+}
+
+static int
+vs_client_core_handle_server_ready(struct vs_client_core_state *state,
+ u32 service_id, u32 in_quota, u32 out_quota, u32 in_bit_offset,
+ u32 in_num_bits, u32 out_bit_offset, u32 out_num_bits)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session;
+ struct vs_service_device *service;
+ int ret;
+
+ if (service_id == 0)
+ return -EPROTO;
+
+ if (!in_quota || !out_quota)
+ return -EINVAL;
+
+ session = vs_service_get_session(client->service);
+ service = vs_session_get_service(session, service_id);
+ if (!service)
+ return -EINVAL;
+
+ service->send_quota = in_quota;
+ service->recv_quota = out_quota;
+ service->notify_send_offset = in_bit_offset;
+ service->notify_send_bits = in_num_bits;
+ service->notify_recv_offset = out_bit_offset;
+ service->notify_recv_bits = out_num_bits;
+
+ ret = vs_service_enable(service);
+ vs_put_service(service);
+ return ret;
+}
+
+static int
+vs_client_core_handle_service_reset(struct vs_client_core_state *state,
+ u32 service_id)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session;
+
+ if (service_id == 0)
+ return -EPROTO;
+
+ session = vs_service_get_session(client->service);
+
+ return vs_service_handle_reset(session, service_id, true);
+}
+
+static void vs_core_client_start(struct vs_client_core_state *state)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session =
+ vs_service_get_session(client->service);
+
+ /* FIXME - start callback should return int */
+ vs_dev_debug(VS_DEBUG_CLIENT_CORE, session, &client->service->dev,
+ "Core client start\n");
+}
+
+static void vs_core_client_reset(struct vs_client_core_state *state)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_session_device *session =
+ vs_service_get_session(client->service);
+ struct pending_reset *msg;
+
+ /* Flush the pending resets - we're about to delete everything */
+ while (!list_empty(&client->message_queue)) {
+ msg = list_first_entry(&client->message_queue,
+ struct pending_reset, list);
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+ }
+
+ vs_session_delete_noncore(session);
+
+ /* Return to the initial quotas, until the next startup message */
+ client->service->send_quota = 0;
+ client->service->recv_quota = 1;
+}
+
+static int vs_core_client_startup(struct vs_client_core_state *state,
+ u32 core_in_quota, u32 core_out_quota)
+{
+ struct core_client *client = to_core_client(state);
+ struct vs_service_device *service = state->service;
+ struct vs_session_device *session = vs_service_get_session(service);
+ int ret;
+
+ if (!core_in_quota || !core_out_quota)
+ return -EINVAL;
+
+ /*
+ * Update the service struct with our real quotas and tell the
+ * transport about the change
+ */
+
+ service->send_quota = core_in_quota;
+ service->recv_quota = core_out_quota;
+ ret = session->transport->vt->service_start(session->transport, service);
+ if (ret < 0)
+ return ret;
+
+ WARN_ON(!list_empty(&client->message_queue));
+
+ return vs_client_core_core_req_connect(state, GFP_KERNEL);
+}
+
+static struct vs_client_core_state *
+vs_core_client_alloc(struct vs_service_device *service)
+{
+ struct core_client *client;
+ int err;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ goto fail;
+
+ client->service = service;
+ INIT_LIST_HEAD(&client->message_queue);
+ INIT_WORK(&client->message_queue_work, message_queue_work);
+ mutex_init(&client->message_queue_lock);
+
+ err = sysfs_create_group(&service->dev.kobj, &client_core_attr_group);
+ if (err)
+ goto fail_free_client;
+
+ /*
+ * Default transport resources for the core service client. The
+ * server will inform us of the real quotas in the startup message.
+ * Note that it is important that the quotas never decrease, so these
+ * numbers are as small as possible.
+ */
+ service->send_quota = 0;
+ service->recv_quota = 1;
+ service->notify_send_bits = 0;
+ service->notify_send_offset = 0;
+ service->notify_recv_bits = 0;
+ service->notify_recv_offset = 0;
+
+ return &client->state;
+
+fail_free_client:
+ kfree(client);
+fail:
+ return NULL;
+}
+
+static void vs_core_client_release(struct vs_client_core_state *state)
+{
+ struct core_client *client = to_core_client(state);
+
+ sysfs_remove_group(&client->service->dev.kobj, &client_core_attr_group);
+ kfree(client);
+}
+
+static struct vs_client_core vs_core_client_driver = {
+ .alloc = vs_core_client_alloc,
+ .release = vs_core_client_release,
+ .start = vs_core_client_start,
+ .reset = vs_core_client_reset,
+ .tx_ready = vs_core_client_tx_ready,
+
+ .core = {
+ .nack_connect = vs_client_core_fatal_error,
+
+ /* FIXME: Jira ticket SDK-3074 - ryanm. */
+ .ack_disconnect = vs_client_core_fatal_error,
+ .nack_disconnect = vs_client_core_fatal_error,
+
+ .msg_service_created = vs_client_core_handle_service_created,
+ .msg_service_removed = vs_client_core_handle_service_removed,
+
+ .msg_startup = vs_core_client_startup,
+ /* FIXME: Jira ticket SDK-3074 - philipd. */
+ .msg_shutdown = vs_client_core_fatal_error,
+ .msg_server_ready = vs_client_core_handle_server_ready,
+ .msg_service_reset = vs_client_core_handle_service_reset,
+ },
+};
+
+/*
+ * Client bus driver
+ */
+static int vs_client_bus_match(struct device *dev, struct device_driver *driver)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+ /* Don't match anything to the devio driver; it's bound manually */
+ if (!vsdrv->protocol)
+ return 0;
+
+ WARN_ON_ONCE(service->is_server || vsdrv->is_server);
+
+ /* Match if the protocol strings are the same */
+ if (strcmp(service->protocol, vsdrv->protocol) == 0)
+ return 1;
+
+ return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static ssize_t dev_protocol_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+static ssize_t service_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static ssize_t quota_in_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->send_quota);
+}
+
+static ssize_t quota_out_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->recv_quota);
+}
+
+static struct device_attribute vs_client_dev_attrs[] = {
+ __ATTR_RO(id),
+ __ATTR_RO(is_server),
+ __ATTR(protocol, S_IRUGO, dev_protocol_show, NULL),
+ __ATTR_RO(service_name),
+ __ATTR_RO(quota_in),
+ __ATTR_RO(quota_out),
+ __ATTR_NULL
+};
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+ struct vs_service_driver *driver = to_vs_service_driver(drv);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", driver->protocol);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static struct driver_attribute vs_client_drv_attrs[] = {
+ __ATTR_RO(protocol),
+ __ATTR_NULL
+};
+#else
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_client_drv_attrs[] = {
+ &driver_attr_protocol.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vs_client_drv);
+#endif
+
+struct bus_type vs_client_bus_type = {
+ .name = "vservices-client",
+ .dev_attrs = vs_client_dev_attrs,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+ .drv_attrs = vs_client_drv_attrs,
+#else
+ .drv_groups = vs_client_drv_groups,
+#endif
+ .match = vs_client_bus_match,
+ .probe = vs_service_bus_probe,
+ .remove = vs_service_bus_remove,
+ .uevent = vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_client_bus_type);
+
+/*
+ * Client session driver
+ */
+static int vs_client_session_probe(struct device *dev)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+ struct vs_service_device *service;
+ char *protocol, *name;
+ int ret = 0;
+
+ if (session->is_server) {
+ ret = -ENODEV;
+ goto fail;
+ }
+
+ /* create a service for the core protocol client */
+ protocol = kstrdup(VSERVICE_CORE_PROTOCOL_NAME, GFP_KERNEL);
+ if (!protocol) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ name = kstrdup("core", GFP_KERNEL);
+ if (!name) {
+ ret = -ENOMEM;
+ goto fail_free_protocol;
+ }
+
+ service = vs_service_register(session, NULL, 0, protocol, name, NULL);
+ if (IS_ERR(service)) {
+ ret = PTR_ERR(service);
+ goto fail_free_name;
+ }
+
+fail_free_name:
+ kfree(name);
+fail_free_protocol:
+ kfree(protocol);
+fail:
+ return ret;
+}
+
+static int
+vs_client_session_send_service_reset(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ if (WARN_ON(service->id == 0))
+ return -EINVAL;
+
+ return vs_client_core_queue_service_reset(session, service);
+}
+
+static struct vs_session_driver vs_client_session_driver = {
+ .driver = {
+ .name = "vservices-client-session",
+ .owner = THIS_MODULE,
+ .bus = &vs_session_bus_type,
+ .probe = vs_client_session_probe,
+ .suppress_bind_attrs = true,
+ },
+ .is_server = false,
+ .service_bus = &vs_client_bus_type,
+ .service_local_reset = vs_client_session_send_service_reset,
+};
+
+static int __init vs_core_client_init(void)
+{
+ int ret;
+
+ ret = bus_register(&vs_client_bus_type);
+ if (ret)
+ goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ vs_devio_client_driver.driver.bus = &vs_client_bus_type;
+ vs_devio_client_driver.driver.owner = THIS_MODULE;
+ ret = driver_register(&vs_devio_client_driver.driver);
+ if (ret)
+ goto fail_devio_register;
+#endif
+
+ ret = driver_register(&vs_client_session_driver.driver);
+ if (ret)
+ goto fail_driver_register;
+
+ ret = vservice_core_client_register(&vs_core_client_driver,
+ "vs_core_client");
+ if (ret)
+ goto fail_core_register;
+
+ vservices_client_root = kobject_create_and_add("client-sessions",
+ vservices_root);
+ if (!vservices_client_root) {
+ ret = -ENOMEM;
+ goto fail_create_root;
+ }
+
+ return 0;
+
+fail_create_root:
+ vservice_core_client_unregister(&vs_core_client_driver);
+fail_core_register:
+ driver_unregister(&vs_client_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ driver_unregister(&vs_devio_client_driver.driver);
+ vs_devio_client_driver.driver.bus = NULL;
+ vs_devio_client_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+ bus_unregister(&vs_client_bus_type);
+fail_bus_register:
+ return ret;
+}
+
+static void __exit vs_core_client_exit(void)
+{
+ kobject_put(vservices_client_root);
+ vservice_core_client_unregister(&vs_core_client_driver);
+ driver_unregister(&vs_client_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ driver_unregister(&vs_devio_client_driver.driver);
+ vs_devio_client_driver.driver.bus = NULL;
+ vs_devio_client_driver.driver.owner = NULL;
+#endif
+ bus_unregister(&vs_client_bus_type);
+}
+
+subsys_initcall(vs_core_client_init);
+module_exit(vs_core_client_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Client Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/core_server.c b/drivers/vservices/core_server.c
new file mode 100644
index 0000000..76ca83c
--- /dev/null
+++ b/drivers/vservices/core_server.c
@@ -0,0 +1,1651 @@
+/*
+ * drivers/vservices/core_server.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Server side core service application driver
+ */
+
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/ctype.h>
+
+#include <vservices/types.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+
+#include "transport.h"
+#include "session.h"
+#include "compat.h"
+
+#define VSERVICE_CORE_SERVICE_NAME "core"
+
+struct core_server {
+ struct vs_server_core_state state;
+ struct vs_service_device *service;
+
+ /*
+ * A list of messages to send, a mutex protecting it, and a
+ * work item to process the list.
+ */
+ struct list_head message_queue;
+ struct mutex message_queue_lock;
+ struct work_struct message_queue_work;
+
+ struct mutex alloc_lock;
+
+ /* The following are all protected by alloc_lock. */
+ unsigned long *in_notify_map;
+ int in_notify_map_bits;
+
+ unsigned long *out_notify_map;
+ int out_notify_map_bits;
+
+ unsigned in_quota_remaining;
+ unsigned out_quota_remaining;
+};
+
+/*
+ * Used for message deferral when the core service is over quota.
+ */
+struct pending_message {
+ vservice_core_message_id_t type;
+ struct vs_service_device *service;
+ struct list_head list;
+};
+
+#define to_core_server(x) container_of(x, struct core_server, state)
+#define dev_to_core_server(x) to_core_server(dev_get_drvdata(x))
+
+static struct vs_session_device *
+vs_core_server_session(struct core_server *server)
+{
+ return vs_service_get_session(server->service);
+}
+
+static struct core_server *
+vs_server_session_core_server(struct vs_session_device *session)
+{
+ struct vs_service_device *core_service = session->core_service;
+
+ if (!core_service)
+ return NULL;
+
+ return dev_to_core_server(&core_service->dev);
+}
+
+static int vs_server_core_send_service_removed(struct core_server *server,
+ struct vs_service_device *service)
+{
+ return vs_server_core_core_send_service_removed(&server->state,
+ service->id, GFP_KERNEL);
+}
+
+static bool
+cancel_pending_created(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct pending_message *msg;
+
+ list_for_each_entry(msg, &server->message_queue, list) {
+ if (msg->type == VSERVICE_CORE_CORE_MSG_SERVICE_CREATED &&
+ msg->service == service) {
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+
+ /* there can only be one */
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int vs_server_core_queue_service_removed(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct pending_message *msg;
+
+ lockdep_assert_held(&service->ready_lock);
+
+ mutex_lock(&server->message_queue_lock);
+
+ /*
+ * If we haven't sent the notification that the service was created,
+ * nuke it and do nothing else.
+ *
+ * This is not just an optimisation; see below.
+ */
+ if (cancel_pending_created(server, service)) {
+ mutex_unlock(&server->message_queue_lock);
+ return 0;
+ }
+
+ /*
+ * Do nothing if the core state is not connected. We must avoid
+ * queueing service_removed messages on a reset service.
+ *
+ * Note that we cannot take the core server state lock here, because
+ * we may (or may not) have been called from a core service message
+ * handler. Thus, we must beware of races with changes to this
+ * condition:
+ *
+ * - It becomes true when the req_connect handler sends an
+ * ack_connect, *after* it queues service_created for each existing
+ * service (while holding the service ready lock). The handler sends
+ * ack_connect with the message queue lock held.
+ *
+ * - If we see the service as connected, then the req_connect
+ * handler has already queued and sent a service_created for this
+ * service, so it's ok for us to send a service_removed.
+ *
+ * - If we see it as disconnected, the req_connect handler hasn't
+ * taken the message queue lock to send ack_connect yet, and thus
+ * has not released the service state lock; so if it queued a
+ * service_created we caught it in the flush above before it was
+ * sent.
+ *
+ * - It becomes false before the reset / disconnect handlers are
+ * called and those will both flush the message queue afterwards.
+ *
+ * - If we see the service as connected, then the reset / disconnect
+ * handler is going to flush the message.
+ *
+ * - If we see it disconnected, the state change has occurred and
+ * implicitly had the same effect as this message, so doing
+ * nothing is correct.
+ *
+ * Note that ordering in all of the above cases is guaranteed by the
+ * message queue lock.
+ */
+ if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+ mutex_unlock(&server->message_queue_lock);
+ return 0;
+ }
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ mutex_unlock(&server->message_queue_lock);
+ return -ENOMEM;
+ }
+
+ msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+ /* put by message_queue_work */
+ msg->service = vs_get_service(service);
+
+ list_add_tail(&msg->list, &server->message_queue);
+
+ mutex_unlock(&server->message_queue_lock);
+ queue_work(server->service->work_queue, &server->message_queue_work);
+
+ return 0;
+}
+
+static int vs_server_core_send_service_created(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct vs_session_device *session =
+ vs_service_get_session(server->service);
+
+ struct vs_mbuf *mbuf;
+ struct vs_string service_name, protocol_name;
+ size_t service_name_len, protocol_name_len;
+
+ int err;
+
+ mbuf = vs_server_core_core_alloc_service_created(&server->state,
+ &service_name, &protocol_name, GFP_KERNEL);
+
+ if (IS_ERR(mbuf))
+ return PTR_ERR(mbuf);
+
+ vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+ "Sending service created message for %d (%s:%s)\n",
+ service->id, service->name, service->protocol);
+
+ service_name_len = strlen(service->name);
+ protocol_name_len = strlen(service->protocol);
+
+ if (service_name_len > vs_string_max_size(&service_name) ||
+ protocol_name_len > vs_string_max_size(&protocol_name)) {
+ dev_err(&session->dev,
+ "Invalid name/protocol for service %d (%s:%s)\n",
+ service->id, service->name,
+ service->protocol);
+ err = -EINVAL;
+ goto fail;
+ }
+
+ vs_string_copyin(&service_name, service->name);
+ vs_string_copyin(&protocol_name, service->protocol);
+
+ err = vs_server_core_core_send_service_created(&server->state,
+ service->id, service_name, protocol_name, mbuf);
+ if (err) {
+ dev_err(&session->dev,
+ "Fatal error sending service creation message for %d (%s:%s): %d\n",
+ service->id, service->name,
+ service->protocol, err);
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ vs_server_core_core_free_service_created(&server->state,
+ &service_name, &protocol_name, mbuf);
+
+ return err;
+}
+
+static int vs_server_core_queue_service_created(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct pending_message *msg;
+
+ lockdep_assert_held(&service->ready_lock);
+ lockdep_assert_held(&server->service->state_mutex);
+
+ mutex_lock(&server->message_queue_lock);
+
+ /* Do nothing if the core state is disconnected. */
+ if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+ mutex_unlock(&server->message_queue_lock);
+ return 0;
+ }
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ mutex_unlock(&server->message_queue_lock);
+ return -ENOMEM;
+ }
+
+ msg->type = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+ /* put by message_queue_work */
+ msg->service = vs_get_service(service);
+
+ list_add_tail(&msg->list, &server->message_queue);
+
+ mutex_unlock(&server->message_queue_lock);
+ queue_work(server->service->work_queue, &server->message_queue_work);
+
+ return 0;
+}
+
+static struct vs_service_device *
+__vs_server_core_register_service(struct vs_session_device *session,
+ vs_service_id_t service_id, struct vs_service_device *owner,
+ const char *name, const char *protocol, const void *plat_data)
+{
+ if (!session->is_server)
+ return ERR_PTR(-ENODEV);
+
+ if (!name || strnlen(name, VSERVICE_CORE_SERVICE_NAME_SIZE + 1) >
+ VSERVICE_CORE_SERVICE_NAME_SIZE || name[0] == '\n')
+ return ERR_PTR(-EINVAL);
+
+ /* The server core must only be registered as service_id zero */
+ if (service_id == 0 && (owner != NULL ||
+ strcmp(name, VSERVICE_CORE_SERVICE_NAME) != 0 ||
+ strcmp(protocol, VSERVICE_CORE_PROTOCOL_NAME) != 0))
+ return ERR_PTR(-EINVAL);
+
+ return vs_service_register(session, owner, service_id, protocol, name,
+ plat_data);
+}
+
+static struct vs_service_device *
+vs_server_core_create_service(struct core_server *server,
+ struct vs_session_device *session,
+ struct vs_service_device *owner, vs_service_id_t service_id,
+ const char *name, const char *protocol, const void *plat_data)
+{
+ struct vs_service_device *service;
+
+ service = __vs_server_core_register_service(session, service_id,
+ owner, name, protocol, plat_data);
+ if (IS_ERR(service))
+ return service;
+
+ if (protocol) {
+ vs_service_state_lock(server->service);
+ vs_service_start(service);
+ if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+ vs_service_enable(service);
+ vs_service_state_unlock(server->service);
+ }
+
+ return service;
+}
+
+static int
+vs_server_core_send_service_reset_ready(struct core_server *server,
+ vservice_core_message_id_t type,
+ struct vs_service_device *service)
+{
+ bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+ struct vs_session_device *session __maybe_unused =
+ vs_service_get_session(server->service);
+ int err;
+
+ vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev,
+ "Sending %s for service %d\n",
+ is_reset ? "reset" : "ready", service->id);
+
+ if (is_reset)
+ err = vs_server_core_core_send_service_reset(&server->state,
+ service->id, GFP_KERNEL);
+ else
+ err = vs_server_core_core_send_server_ready(&server->state,
+ service->id, service->recv_quota,
+ service->send_quota,
+ service->notify_recv_offset,
+ service->notify_recv_bits,
+ service->notify_send_offset,
+ service->notify_send_bits,
+ GFP_KERNEL);
+
+ return err;
+}
+
+static bool
+cancel_pending_ready(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct pending_message *msg;
+
+ list_for_each_entry(msg, &server->message_queue, list) {
+ if (msg->type == VSERVICE_CORE_CORE_MSG_SERVER_READY &&
+ msg->service == service) {
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+
+ /* there can only be one */
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static int
+vs_server_core_queue_service_reset_ready(struct core_server *server,
+ vservice_core_message_id_t type,
+ struct vs_service_device *service)
+{
+ bool is_reset = (type == VSERVICE_CORE_CORE_MSG_SERVICE_RESET);
+ struct pending_message *msg;
+
+ mutex_lock(&server->message_queue_lock);
+
+ /*
+ * If this is a reset, and there is an outgoing ready in the
+ * queue, we must cancel it so it can't be sent with invalid
+ * transport resources, and then return immediately so we
+ * don't send a redundant reset.
+ */
+ if (is_reset && cancel_pending_ready(server, service)) {
+ mutex_unlock(&server->message_queue_lock);
+ return VS_SERVICE_ALREADY_RESET;
+ }
+
+ msg = kzalloc(sizeof(*msg), GFP_KERNEL);
+ if (!msg) {
+ mutex_unlock(&server->message_queue_lock);
+ return -ENOMEM;
+ }
+
+ msg->type = type;
+ /* put by message_queue_work */
+ msg->service = vs_get_service(service);
+ list_add_tail(&msg->list, &server->message_queue);
+
+ mutex_unlock(&server->message_queue_lock);
+ queue_work(server->service->work_queue, &server->message_queue_work);
+
+ return 0;
+}
+
+static int vs_core_server_tx_ready(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session __maybe_unused =
+ vs_service_get_session(server->service);
+
+ vs_dev_debug(VS_DEBUG_SERVER, session, &session->dev, "tx_ready\n");
+
+ queue_work(server->service->work_queue, &server->message_queue_work);
+
+ return 0;
+}
+
+static void message_queue_work(struct work_struct *work)
+{
+ struct core_server *server = container_of(work, struct core_server,
+ message_queue_work);
+ struct pending_message *msg;
+ int err;
+
+ vs_service_state_lock(server->service);
+
+ if (!VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core)) {
+ vs_service_state_unlock(server->service);
+ return;
+ }
+
+ /*
+ * If any pending message fails we exit the loop immediately so that
+ * we preserve the message order.
+ */
+ mutex_lock(&server->message_queue_lock);
+ while (!list_empty(&server->message_queue)) {
+ msg = list_first_entry(&server->message_queue,
+ struct pending_message, list);
+
+ switch (msg->type) {
+ case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+ err = vs_server_core_send_service_created(server,
+ msg->service);
+ break;
+
+ case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+ err = vs_server_core_send_service_removed(server,
+ msg->service);
+ break;
+
+ case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+ case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+ err = vs_server_core_send_service_reset_ready(
+ server, msg->type, msg->service);
+ break;
+
+ default:
+ dev_warn(&server->service->dev,
+ "Don't know how to handle pending message type %d\n",
+ msg->type);
+ err = 0;
+ break;
+ }
+
+ /*
+ * If we're out of quota we exit and wait for tx_ready to
+ * queue us again.
+ */
+ if (err == -ENOBUFS)
+ break;
+
+ /* Any other error is fatal */
+ if (err < 0) {
+ dev_err(&server->service->dev,
+ "Failed to send pending message type %d: %d - resetting session",
+ msg->type, err);
+ vs_service_reset_nosync(server->service);
+ break;
+ }
+
+ /*
+ * The message sent successfully - remove it from the
+ * queue. The corresponding vs_get_service() was done
+ * when the pending message was created.
+ */
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+ }
+ mutex_unlock(&server->message_queue_lock);
+
+ vs_service_state_unlock(server->service);
+
+ return;
+}
+
+/*
+ * Core server sysfs interface
+ */
+static ssize_t server_core_create_service_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = to_vs_session_device(dev->parent);
+ struct core_server *server = dev_to_core_server(&service->dev);
+ struct vs_service_device *new_service;
+ char *p;
+ ssize_t ret = count;
+
+ /* FIXME - Buffer sizes are not defined in generated headers */
+ /* discard leading whitespace */
+ while (count && isspace(*buf)) {
+ buf++;
+ count--;
+ }
+ if (!count) {
+ dev_info(dev, "empty service name");
+ return -EINVAL;
+ }
+ /* discard trailing whitespace */
+ while (count && isspace(buf[count - 1]))
+ count--;
+
+ if (count > VSERVICE_CORE_SERVICE_NAME_SIZE) {
+ dev_info(dev, "service name too long (max %d)\n", VSERVICE_CORE_SERVICE_NAME_SIZE);
+ return -EINVAL;
+ }
+
+ p = kstrndup(buf, count, GFP_KERNEL);
+
+ /*
+ * Writing a service name to this file creates a new service. The
+ * service is created without a protocol. It will appear in sysfs
+ * but will not be bound to a driver until a valid protocol name
+ * has been written to the created devices protocol sysfs attribute.
+ */
+ new_service = vs_server_core_create_service(server, session, service,
+ VS_SERVICE_AUTO_ALLOCATE_ID, p, NULL, NULL);
+ if (IS_ERR(new_service))
+ ret = PTR_ERR(new_service);
+
+ kfree(p);
+
+ return ret;
+}
+
+static ssize_t server_core_reset_service_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *core_service = to_vs_service_device(dev);
+ struct vs_session_device *session =
+ vs_service_get_session(core_service);
+ struct vs_service_device *target;
+ vs_service_id_t service_id;
+ unsigned long val;
+ int err;
+
+ /*
+ * Writing a valid service_id to this file does a reset of that service
+ */
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ service_id = val;
+ target = vs_session_get_service(session, service_id);
+ if (!target)
+ return -EINVAL;
+
+ err = vs_service_reset(target, core_service);
+
+ vs_put_service(target);
+ return err < 0 ? err : count;
+}
+
+static ssize_t server_core_remove_service_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_service_device *target;
+ vs_service_id_t service_id;
+ unsigned long val;
+ int err;
+
+ err = kstrtoul(buf, 0, &val);
+ if (err)
+ return err;
+
+ service_id = val;
+ if (service_id == 0) {
+ /*
+ * We don't allow removing the core service this way. The
+ * core service will be removed when the session is removed.
+ */
+ return -EINVAL;
+ }
+
+ target = vs_session_get_service(session, service_id);
+ if (!target)
+ return -EINVAL;
+
+ err = vs_service_delete(target, service);
+
+ vs_put_service(target);
+ return err < 0 ? err : count;
+}
+
+static DEVICE_ATTR(create_service, S_IWUSR,
+ NULL, server_core_create_service_store);
+static DEVICE_ATTR(reset_service, S_IWUSR,
+ NULL, server_core_reset_service_store);
+static DEVICE_ATTR(remove_service, S_IWUSR,
+ NULL, server_core_remove_service_store);
+
+static struct attribute *server_core_dev_attrs[] = {
+ &dev_attr_create_service.attr,
+ &dev_attr_reset_service.attr,
+ &dev_attr_remove_service.attr,
+ NULL,
+};
+
+static const struct attribute_group server_core_attr_group = {
+ .attrs = server_core_dev_attrs,
+};
+
+static int init_transport_resource_allocation(struct core_server *server)
+{
+ struct vs_session_device *session = vs_core_server_session(server);
+ struct vs_transport *transport = session->transport;
+ size_t size;
+ int err;
+
+ mutex_init(&server->alloc_lock);
+ mutex_lock(&server->alloc_lock);
+
+ transport->vt->get_quota_limits(transport, &server->out_quota_remaining,
+ &server->in_quota_remaining);
+
+ transport->vt->get_notify_bits(transport, &server->out_notify_map_bits,
+ &server->in_notify_map_bits);
+
+ size = BITS_TO_LONGS(server->in_notify_map_bits) *
+ sizeof(unsigned long);
+ server->in_notify_map = kzalloc(size, GFP_KERNEL);
+ if (server->in_notify_map_bits && !server->in_notify_map) {
+ err = -ENOMEM;
+ goto fail;
+ }
+
+ size = BITS_TO_LONGS(server->out_notify_map_bits) *
+ sizeof(unsigned long);
+ server->out_notify_map = kzalloc(size, GFP_KERNEL);
+ if (server->out_notify_map_bits && !server->out_notify_map) {
+ err = -ENOMEM;
+ goto fail_free_in_bits;
+ }
+
+ mutex_unlock(&server->alloc_lock);
+
+ return 0;
+
+fail_free_in_bits:
+ kfree(server->in_notify_map);
+fail:
+ mutex_unlock(&server->alloc_lock);
+ return err;
+}
+
+static int alloc_quota(unsigned minimum, unsigned best, unsigned set,
+ unsigned *remaining)
+{
+ unsigned quota;
+
+ if (set) {
+ quota = set;
+
+ if (quota > *remaining)
+ return -ENOSPC;
+ } else if (best) {
+ quota = min(best, *remaining);
+ } else {
+ quota = minimum;
+ }
+
+ if (quota < minimum)
+ return -ENOSPC;
+
+ *remaining -= quota;
+
+ return min_t(unsigned, quota, INT_MAX);
+}
+
+static int alloc_notify_bits(unsigned notify_count, unsigned long *map,
+ unsigned nr_bits)
+{
+ unsigned offset;
+
+ if (notify_count) {
+ offset = bitmap_find_next_zero_area(map, nr_bits, 0,
+ notify_count, 0);
+
+ if (offset >= nr_bits || offset > (unsigned)INT_MAX)
+ return -ENOSPC;
+
+ bitmap_set(map, offset, notify_count);
+ } else {
+ offset = 0;
+ }
+
+ return offset;
+}
+
+/*
+ * alloc_transport_resources - Allocates the quotas and notification bits for
+ * a service.
+ * @server: the core service state.
+ * @service: the service device to allocate resources for.
+ *
+ * This function allocates message quotas and notification bits. It is called
+ * for the core service in alloc(), and for every other service by the server
+ * bus probe() function.
+ */
+static int alloc_transport_resources(struct core_server *server,
+ struct vs_service_device *service)
+{
+ struct vs_session_device *session __maybe_unused =
+ vs_service_get_session(service);
+ unsigned in_bit_offset, out_bit_offset;
+ unsigned in_quota, out_quota;
+ int ret;
+ struct vs_service_driver *driver;
+
+ if (WARN_ON(!service->dev.driver))
+ return -ENODEV;
+
+ mutex_lock(&server->alloc_lock);
+
+ driver = to_vs_service_driver(service->dev.driver);
+
+ /* Quota allocations */
+ ret = alloc_quota(driver->in_quota_min, driver->in_quota_best,
+ service->in_quota_set, &server->in_quota_remaining);
+ if (ret < 0) {
+ dev_err(&service->dev, "cannot allocate in quota\n");
+ goto fail_in_quota;
+ }
+ in_quota = ret;
+
+ ret = alloc_quota(driver->out_quota_min, driver->out_quota_best,
+ service->out_quota_set, &server->out_quota_remaining);
+ if (ret < 0) {
+ dev_err(&service->dev, "cannot allocate out quota\n");
+ goto fail_out_quota;
+ }
+ out_quota = ret;
+
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "%d: quota in: %u out: %u; remaining in: %u out: %u\n",
+ service->id, in_quota, out_quota,
+ server->in_quota_remaining,
+ server->out_quota_remaining);
+
+ /* Notification bit allocations */
+ ret = alloc_notify_bits(service->notify_recv_bits,
+ server->in_notify_map, server->in_notify_map_bits);
+ if (ret < 0) {
+ dev_err(&service->dev, "cannot allocate in notify bits\n");
+ goto fail_in_notify;
+ }
+ in_bit_offset = ret;
+
+ ret = alloc_notify_bits(service->notify_send_bits,
+ server->out_notify_map, server->out_notify_map_bits);
+ if (ret < 0) {
+ dev_err(&service->dev, "cannot allocate out notify bits\n");
+ goto fail_out_notify;
+ }
+ out_bit_offset = ret;
+
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "notify bits in: %u/%u out: %u/%u\n",
+ in_bit_offset, service->notify_recv_bits,
+ out_bit_offset, service->notify_send_bits);
+
+ /* Fill in the device's allocations */
+ service->recv_quota = in_quota;
+ service->send_quota = out_quota;
+ service->notify_recv_offset = in_bit_offset;
+ service->notify_send_offset = out_bit_offset;
+
+ mutex_unlock(&server->alloc_lock);
+
+ return 0;
+
+fail_out_notify:
+ if (service->notify_recv_bits)
+ bitmap_clear(server->in_notify_map,
+ in_bit_offset, service->notify_recv_bits);
+fail_in_notify:
+ server->out_quota_remaining += out_quota;
+fail_out_quota:
+ server->in_quota_remaining += in_quota;
+fail_in_quota:
+
+ mutex_unlock(&server->alloc_lock);
+
+ service->recv_quota = 0;
+ service->send_quota = 0;
+ service->notify_recv_bits = 0;
+ service->notify_recv_offset = 0;
+ service->notify_send_bits = 0;
+ service->notify_send_offset = 0;
+
+ return ret;
+}
+
+/*
+ * free_transport_resources - Frees the quotas and notification bits for
+ * a non-core service.
+ * @server: the core service state.
+ * @service: the service device to free resources for.
+ *
+ * This function is called by the server to free message quotas and
+ * notification bits that were allocated by alloc_transport_resources. It must
+ * only be called when the target service is in reset, and must be called with
+ * the core service's state lock held.
+ */
+static int free_transport_resources(struct core_server *server,
+ struct vs_service_device *service)
+{
+ mutex_lock(&server->alloc_lock);
+
+ if (service->notify_recv_bits)
+ bitmap_clear(server->in_notify_map,
+ service->notify_recv_offset,
+ service->notify_recv_bits);
+
+ if (service->notify_send_bits)
+ bitmap_clear(server->out_notify_map,
+ service->notify_send_offset,
+ service->notify_send_bits);
+
+ server->in_quota_remaining += service->recv_quota;
+ server->out_quota_remaining += service->send_quota;
+
+ mutex_unlock(&server->alloc_lock);
+
+ service->recv_quota = 0;
+ service->send_quota = 0;
+ service->notify_recv_bits = 0;
+ service->notify_recv_offset = 0;
+ service->notify_send_bits = 0;
+ service->notify_send_offset = 0;
+
+ return 0;
+}
+
+static struct vs_server_core_state *
+vs_core_server_alloc(struct vs_service_device *service)
+{
+ struct core_server *server;
+ int err;
+
+ if (WARN_ON(service->id != 0))
+ goto fail;
+
+ server = kzalloc(sizeof(*server), GFP_KERNEL);
+ if (!server)
+ goto fail;
+
+ server->service = service;
+ INIT_LIST_HEAD(&server->message_queue);
+ INIT_WORK(&server->message_queue_work, message_queue_work);
+ mutex_init(&server->message_queue_lock);
+
+ err = init_transport_resource_allocation(server);
+ if (err)
+ goto fail_init_alloc;
+
+ err = alloc_transport_resources(server, service);
+ if (err)
+ goto fail_alloc_transport;
+
+ err = sysfs_create_group(&service->dev.kobj, &server_core_attr_group);
+ if (err)
+ goto fail_sysfs;
+
+ return &server->state;
+
+fail_sysfs:
+ free_transport_resources(server, service);
+fail_alloc_transport:
+ kfree(server->out_notify_map);
+ kfree(server->in_notify_map);
+fail_init_alloc:
+ kfree(server);
+fail:
+ return NULL;
+}
+
+static void vs_core_server_release(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session = vs_core_server_session(server);
+
+ /* Delete all the other services */
+ vs_session_delete_noncore(session);
+
+ sysfs_remove_group(&server->service->dev.kobj, &server_core_attr_group);
+ kfree(server->out_notify_map);
+ kfree(server->in_notify_map);
+ kfree(server);
+}
+
+/**
+ * vs_server_create_service - create and register a new vService server
+ * @session: the session to create the vService server on
+ * @parent: an existing server that is managing the new server
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ */
+struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+ struct vs_service_device *parent, const char *name,
+ const char *protocol, const void *plat_data)
+{
+ struct vs_service_device *core_service, *new_service;
+ struct core_server *server;
+
+ if (!session->is_server || !name || !protocol)
+ return NULL;
+
+ core_service = session->core_service;
+ if (!core_service)
+ return NULL;
+
+ device_lock(&core_service->dev);
+ if (!core_service->dev.driver) {
+ device_unlock(&core_service->dev);
+ return NULL;
+ }
+
+ server = dev_to_core_server(&core_service->dev);
+
+ if (!parent)
+ parent = core_service;
+
+ new_service = vs_server_core_create_service(server, session, parent,
+ VS_SERVICE_AUTO_ALLOCATE_ID, name, protocol, plat_data);
+
+ device_unlock(&core_service->dev);
+
+ if (IS_ERR(new_service))
+ return NULL;
+
+ return new_service;
+}
+EXPORT_SYMBOL(vs_server_create_service);
+
+/**
+ * vs_server_destroy_service - destroy and unregister a vService server. This
+ * function must _not_ be used from the target service's own workqueue.
+ * @service: The service to destroy
+ */
+int vs_server_destroy_service(struct vs_service_device *service,
+ struct vs_service_device *parent)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ if (!session->is_server || service->id == 0)
+ return -EINVAL;
+
+ if (!parent)
+ parent = session->core_service;
+
+ return vs_service_delete(service, parent);
+}
+EXPORT_SYMBOL(vs_server_destroy_service);
+
+static void __queue_service_created(struct vs_service_device *service,
+ void *data)
+{
+ struct core_server *server = (struct core_server *)data;
+
+ vs_server_core_queue_service_created(server, service);
+}
+
+static int vs_server_core_handle_connect(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session = vs_core_server_session(server);
+ int err;
+
+ /* Tell the other end that we've finished connecting. */
+ err = vs_server_core_core_send_ack_connect(state, GFP_KERNEL);
+ if (err)
+ return err;
+
+ /* Queue a service-created message for each existing service. */
+ vs_session_for_each_service(session, __queue_service_created, server);
+
+ /* Re-enable all the services. */
+ vs_session_enable_noncore(session);
+
+ return 0;
+}
+
+static void vs_core_server_disable_services(struct core_server *server)
+{
+ struct vs_session_device *session = vs_core_server_session(server);
+ struct pending_message *msg;
+
+ /* Disable all the other services */
+ vs_session_disable_noncore(session);
+
+ /* Flush all the pending service-readiness messages */
+ mutex_lock(&server->message_queue_lock);
+ while (!list_empty(&server->message_queue)) {
+ msg = list_first_entry(&server->message_queue,
+ struct pending_message, list);
+ vs_put_service(msg->service);
+ list_del(&msg->list);
+ kfree(msg);
+ }
+ mutex_unlock(&server->message_queue_lock);
+}
+
+static int vs_server_core_handle_disconnect(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+
+ vs_core_server_disable_services(server);
+
+ return vs_server_core_core_send_ack_disconnect(state, GFP_KERNEL);
+}
+
+static int
+vs_server_core_handle_service_reset(struct vs_server_core_state *state,
+ unsigned service_id)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session = vs_core_server_session(server);
+
+ if (service_id == 0)
+ return -EPROTO;
+
+ return vs_service_handle_reset(session, service_id, false);
+}
+
+static void vs_core_server_start(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session = vs_core_server_session(server);
+ int err;
+
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+ "Core server start\n");
+
+ err = vs_server_core_core_send_startup(&server->state,
+ server->service->recv_quota,
+ server->service->send_quota, GFP_KERNEL);
+
+ if (err)
+ dev_err(&session->dev, "Failed to start core protocol: %d\n",
+ err);
+}
+
+static void vs_core_server_reset(struct vs_server_core_state *state)
+{
+ struct core_server *server = to_core_server(state);
+ struct vs_session_device *session = vs_core_server_session(server);
+
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &server->service->dev,
+ "Core server reset\n");
+
+ vs_core_server_disable_services(server);
+}
+
+static struct vs_server_core vs_core_server_driver = {
+ .alloc = vs_core_server_alloc,
+ .release = vs_core_server_release,
+ .start = vs_core_server_start,
+ .reset = vs_core_server_reset,
+ .tx_ready = vs_core_server_tx_ready,
+ .core = {
+ .req_connect = vs_server_core_handle_connect,
+ .req_disconnect = vs_server_core_handle_disconnect,
+ .msg_service_reset = vs_server_core_handle_service_reset,
+ },
+};
+
+/*
+ * Server bus driver
+ */
+static int vs_server_bus_match(struct device *dev, struct device_driver *driver)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_service_driver *vsdrv = to_vs_service_driver(driver);
+
+ /* Don't match anything to the devio driver; it's bound manually */
+ if (!vsdrv->protocol)
+ return 0;
+
+ WARN_ON_ONCE(!service->is_server || !vsdrv->is_server);
+
+ /* Don't match anything that doesn't have a protocol set yet */
+ if (!service->protocol)
+ return 0;
+
+ if (strcmp(service->protocol, vsdrv->protocol) == 0)
+ return 1;
+
+ return 0;
+}
+
+static int vs_server_bus_probe(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct core_server *server = vs_server_session_core_server(session);
+ int ret;
+
+ /*
+ * Set the notify counts for the service, unless the driver is the
+ * devio driver in which case it has already been done by the devio
+ * bind ioctl. The devio driver cannot be bound automatically.
+ */
+ struct vs_service_driver *driver =
+ to_vs_service_driver(service->dev.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ if (driver != &vs_devio_server_driver)
+#endif
+ {
+ service->notify_recv_bits = driver->in_notify_count;
+ service->notify_send_bits = driver->out_notify_count;
+ }
+
+ /*
+ * We can't allocate transport resources here for the core service
+ * because the resource pool doesn't exist yet. It's done in alloc()
+ * instead (which is called, indirectly, by vs_service_bus_probe()).
+ */
+ if (service->id == 0)
+ return vs_service_bus_probe(dev);
+
+ if (!server)
+ return -ENODEV;
+ ret = alloc_transport_resources(server, service);
+ if (ret < 0)
+ goto fail;
+
+ ret = vs_service_bus_probe(dev);
+ if (ret < 0)
+ goto fail_free_resources;
+
+ return 0;
+
+fail_free_resources:
+ free_transport_resources(server, service);
+fail:
+ return ret;
+}
+
+static int vs_server_bus_remove(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct core_server *server = vs_server_session_core_server(session);
+
+ vs_service_bus_remove(dev);
+
+ /*
+ * We skip free_transport_resources for the core service because the
+ * resource pool has already been freed at this point. It's also
+ * possible that the core service has disappeared, in which case
+ * there's no work to do here.
+ */
+ if (server != NULL && service->id != 0)
+ free_transport_resources(server, service);
+
+ return 0;
+}
+
+static ssize_t is_server_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->is_server);
+}
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", service->id);
+}
+
+static ssize_t dev_protocol_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", service->protocol ?: "");
+}
+
+struct service_enable_work_struct {
+ struct vs_service_device *service;
+ struct work_struct work;
+};
+
+static void service_enable_work(struct work_struct *work)
+{
+ struct service_enable_work_struct *enable_work = container_of(work,
+ struct service_enable_work_struct, work);
+ struct vs_service_device *service = enable_work->service;
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct core_server *server = vs_server_session_core_server(session);
+ bool started;
+ int ret;
+
+ kfree(enable_work);
+
+ if (!server)
+ return;
+ /* Start and enable the service */
+ vs_service_state_lock(server->service);
+ started = vs_service_start(service);
+ if (!started) {
+ vs_service_state_unlock(server->service);
+ vs_put_service(service);
+ return;
+ }
+
+ if (VSERVICE_CORE_STATE_IS_CONNECTED(server->state.state.core))
+ vs_service_enable(service);
+ vs_service_state_unlock(server->service);
+
+ /* Tell the bus to search for a driver that supports the protocol */
+ ret = device_attach(&service->dev);
+ if (ret == 0)
+ dev_warn(&service->dev, "No driver found for protocol: %s\n",
+ service->protocol);
+ kobject_uevent(&service->dev.kobj, KOBJ_CHANGE);
+
+ /* The corresponding vs_get_service was done when the work was queued */
+ vs_put_service(service);
+}
+
+static ssize_t dev_protocol_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct service_enable_work_struct *enable_work;
+
+ /* The protocol can only be set once */
+ if (service->protocol)
+ return -EPERM;
+
+ /* Registering additional core servers is not allowed */
+ if (strcmp(buf, VSERVICE_CORE_PROTOCOL_NAME) == 0)
+ return -EINVAL;
+
+ if (strnlen(buf, VSERVICE_CORE_PROTOCOL_NAME_SIZE) + 1 >
+ VSERVICE_CORE_PROTOCOL_NAME_SIZE)
+ return -E2BIG;
+
+ enable_work = kmalloc(sizeof(*enable_work), GFP_KERNEL);
+ if (!enable_work)
+ return -ENOMEM;
+
+ /* Set the protocol and tell the client about it */
+ service->protocol = kstrdup(buf, GFP_KERNEL);
+ if (!service->protocol) {
+ kfree(enable_work);
+ return -ENOMEM;
+ }
+ strim(service->protocol);
+
+ /*
+ * Schedule work to enable the service. We can't do it here because
+ * we need to take the core service lock, and doing that here makes
+ * it depend circularly on this sysfs attribute, which can be deleted
+ * with that lock held.
+ *
+ * The corresponding vs_put_service is called in the enable_work
+ * function.
+ */
+ INIT_WORK(&enable_work->work, service_enable_work);
+ enable_work->service = vs_get_service(service);
+ schedule_work(&enable_work->work);
+
+ return count;
+}
+
+static ssize_t service_name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", service->name);
+}
+
+static ssize_t quota_in_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct core_server *server = vs_server_session_core_server(session);
+ int ret;
+ unsigned long in_quota;
+
+ if (!server)
+ return -ENODEV;
+ /*
+ * Don't allow quota to be changed for services that have a driver
+ * bound. We take the alloc lock here because the device lock is held
+ * while creating and destroying this sysfs item. This means we can
+ * race with driver binding, but that doesn't matter: we actually just
+ * want to know that alloc_transport_resources() hasn't run yet, and
+ * that takes the alloc lock.
+ */
+ mutex_lock(&server->alloc_lock);
+ if (service->dev.driver) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ ret = kstrtoul(buf, 0, &in_quota);
+ if (ret < 0)
+ goto out;
+
+ service->in_quota_set = in_quota;
+ ret = count;
+
+out:
+ mutex_unlock(&server->alloc_lock);
+
+ return ret;
+}
+
+static ssize_t quota_in_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", service->recv_quota);
+}
+
+static ssize_t quota_out_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct core_server *server = vs_server_session_core_server(session);
+ int ret;
+ unsigned long out_quota;
+
+ if (!server)
+ return -ENODEV;
+ /* See comment in quota_in_store. */
+ mutex_lock(&server->alloc_lock);
+ if (service->dev.driver) {
+ ret = -EPERM;
+ goto out;
+ }
+
+ ret = kstrtoul(buf, 0, &out_quota);
+ if (ret < 0)
+ goto out;
+
+ service->out_quota_set = out_quota;
+ ret = count;
+
+out:
+ mutex_unlock(&server->alloc_lock);
+
+ return ret;
+}
+
+static ssize_t quota_out_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%u\n", service->send_quota);
+}
+
+static struct device_attribute vs_server_dev_attrs[] = {
+ __ATTR_RO(id),
+ __ATTR_RO(is_server),
+ __ATTR(protocol, S_IRUGO | S_IWUSR,
+ dev_protocol_show, dev_protocol_store),
+ __ATTR_RO(service_name),
+ __ATTR(quota_in, S_IRUGO | S_IWUSR,
+ quota_in_show, quota_in_store),
+ __ATTR(quota_out, S_IRUGO | S_IWUSR,
+ quota_out_show, quota_out_store),
+ __ATTR_NULL
+};
+
+static ssize_t protocol_show(struct device_driver *drv, char *buf)
+{
+ struct vs_service_driver *vsdrv = to_vs_service_driver(drv);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", vsdrv->protocol);
+}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+static struct driver_attribute vs_server_drv_attrs[] = {
+ __ATTR_RO(protocol),
+ __ATTR_NULL
+};
+#else
+static DRIVER_ATTR_RO(protocol);
+
+static struct attribute *vs_server_drv_attrs[] = {
+ &driver_attr_protocol.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vs_server_drv);
+#endif
+
+struct bus_type vs_server_bus_type = {
+ .name = "vservices-server",
+ .dev_attrs = vs_server_dev_attrs,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 14, 0)
+ .drv_attrs = vs_server_drv_attrs,
+#else
+ .drv_groups = vs_server_drv_groups,
+#endif
+ .match = vs_server_bus_match,
+ .probe = vs_server_bus_probe,
+ .remove = vs_server_bus_remove,
+ .uevent = vs_service_bus_uevent,
+};
+EXPORT_SYMBOL(vs_server_bus_type);
+
+/*
+ * Server session driver
+ */
+static int vs_server_session_probe(struct device *dev)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+ struct vs_service_device *service;
+
+ service = __vs_server_core_register_service(session, 0, NULL,
+ VSERVICE_CORE_SERVICE_NAME,
+ VSERVICE_CORE_PROTOCOL_NAME, NULL);
+ if (IS_ERR(service))
+ return PTR_ERR(service);
+
+ return 0;
+}
+
+static int
+vs_server_session_service_added(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ struct core_server *server = vs_server_session_core_server(session);
+ int err;
+
+ if (WARN_ON(!server || !service->id))
+ return -EINVAL;
+
+ err = vs_server_core_queue_service_created(server, service);
+
+ if (err)
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "failed to send service_created: %d\n", err);
+
+ return err;
+}
+
+static int
+vs_server_session_service_start(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ struct core_server *server = vs_server_session_core_server(session);
+ int err;
+
+ if (WARN_ON(!server || !service->id))
+ return -EINVAL;
+
+ err = vs_server_core_queue_service_reset_ready(server,
+ VSERVICE_CORE_CORE_MSG_SERVER_READY, service);
+
+ if (err)
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "failed to send server_ready: %d\n", err);
+
+ return err;
+}
+
+static int
+vs_server_session_service_local_reset(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ struct core_server *server = vs_server_session_core_server(session);
+ int err;
+
+ if (WARN_ON(!server || !service->id))
+ return -EINVAL;
+
+ err = vs_server_core_queue_service_reset_ready(server,
+ VSERVICE_CORE_CORE_MSG_SERVICE_RESET, service);
+
+ if (err)
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "failed to send service_reset: %d\n", err);
+
+ return err;
+}
+
+static int
+vs_server_session_service_removed(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ struct core_server *server = vs_server_session_core_server(session);
+ int err;
+
+ /*
+ * It's possible for the core server to be forcibly removed before
+ * the other services, for example when the underlying transport
+ * vanishes. If that happens, we can end up here with a NULL core
+ * server pointer.
+ */
+ if (!server)
+ return 0;
+
+ if (WARN_ON(!service->id))
+ return -EINVAL;
+
+ err = vs_server_core_queue_service_removed(server, service);
+ if (err)
+ vs_dev_debug(VS_DEBUG_SERVER_CORE, session, &session->dev,
+ "failed to send service_removed: %d\n", err);
+
+ return err;
+}
+
+static struct vs_session_driver vs_server_session_driver = {
+ .driver = {
+ .name = "vservices-server-session",
+ .owner = THIS_MODULE,
+ .bus = &vs_session_bus_type,
+ .probe = vs_server_session_probe,
+ .suppress_bind_attrs = true,
+ },
+ .is_server = true,
+ .service_bus = &vs_server_bus_type,
+ .service_added = vs_server_session_service_added,
+ .service_start = vs_server_session_service_start,
+ .service_local_reset = vs_server_session_service_local_reset,
+ .service_removed = vs_server_session_service_removed,
+};
+
+static int __init vs_core_server_init(void)
+{
+ int ret;
+
+ ret = bus_register(&vs_server_bus_type);
+ if (ret)
+ goto fail_bus_register;
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ vs_devio_server_driver.driver.bus = &vs_server_bus_type;
+ vs_devio_server_driver.driver.owner = THIS_MODULE;
+ ret = driver_register(&vs_devio_server_driver.driver);
+ if (ret)
+ goto fail_devio_register;
+#endif
+
+ ret = driver_register(&vs_server_session_driver.driver);
+ if (ret)
+ goto fail_driver_register;
+
+ ret = vservice_core_server_register(&vs_core_server_driver,
+ "vs_core_server");
+ if (ret)
+ goto fail_core_register;
+
+ vservices_server_root = kobject_create_and_add("server-sessions",
+ vservices_root);
+ if (!vservices_server_root) {
+ ret = -ENOMEM;
+ goto fail_create_root;
+ }
+
+ return 0;
+
+fail_create_root:
+ vservice_core_server_unregister(&vs_core_server_driver);
+fail_core_register:
+ driver_unregister(&vs_server_session_driver.driver);
+fail_driver_register:
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ driver_unregister(&vs_devio_server_driver.driver);
+ vs_devio_server_driver.driver.bus = NULL;
+ vs_devio_server_driver.driver.owner = NULL;
+fail_devio_register:
+#endif
+ bus_unregister(&vs_server_bus_type);
+fail_bus_register:
+ return ret;
+}
+
+static void __exit vs_core_server_exit(void)
+{
+ kobject_put(vservices_server_root);
+ vservice_core_server_unregister(&vs_core_server_driver);
+ driver_unregister(&vs_server_session_driver.driver);
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ driver_unregister(&vs_devio_server_driver.driver);
+ vs_devio_server_driver.driver.bus = NULL;
+ vs_devio_server_driver.driver.owner = NULL;
+#endif
+ bus_unregister(&vs_server_bus_type);
+}
+
+subsys_initcall(vs_core_server_init);
+module_exit(vs_core_server_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Core Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/debug.h b/drivers/vservices/debug.h
new file mode 100644
index 0000000..b379b04
--- /dev/null
+++ b/drivers/vservices/debug.h
@@ -0,0 +1,74 @@
+/*
+ * drivers/vservices/debug.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * Debugging macros and support functions for Virtual Services.
+ */
+#ifndef _VSERVICES_DEBUG_H
+#define _VSERVICES_DEBUG_H
+
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 38)
+#include <linux/printk.h>
+#else
+#ifndef no_printk
+#define no_printk(format, args...) do { } while (0)
+#endif
+#endif
+
+#include <vservices/session.h>
+#include "transport.h"
+
+#define VS_DEBUG_TRANSPORT (1 << 0)
+#define VS_DEBUG_TRANSPORT_MESSAGES (1 << 1)
+#define VS_DEBUG_SESSION (1 << 2)
+#define VS_DEBUG_CLIENT (1 << 3)
+#define VS_DEBUG_CLIENT_CORE (1 << 4)
+#define VS_DEBUG_SERVER (1 << 5)
+#define VS_DEBUG_SERVER_CORE (1 << 6)
+#define VS_DEBUG_PROTOCOL (1 << 7)
+#define VS_DEBUG_ALL 0xff
+
+#ifdef CONFIG_VSERVICES_DEBUG
+
+#define vs_debug(type, session, format, args...) \
+ do { \
+ if ((session)->debug_mask & (type)) \
+ dev_dbg(&(session)->dev, format, ##args); \
+ } while (0)
+
+#define vs_dev_debug(type, session, dev, format, args...) \
+ do { \
+ if ((session)->debug_mask & (type)) \
+ dev_dbg(dev, format, ##args); \
+ } while (0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+ struct vs_mbuf *mbuf)
+{
+ if (session->debug_mask & VS_DEBUG_TRANSPORT_MESSAGES)
+ print_hex_dump_bytes("msg:", DUMP_PREFIX_OFFSET,
+ mbuf->data, mbuf->size);
+}
+
+#else
+
+/* Dummy versions: Use no_printk to retain type/format string checking */
+#define vs_debug(type, session, format, args...) \
+ do { (void)session; no_printk(format, ##args); } while(0)
+
+#define vs_dev_debug(type, session, dev, format, args...) \
+ do { (void)session; (void)dev; no_printk(format, ##args); } while(0)
+
+static inline void vs_debug_dump_mbuf(struct vs_session_device *session,
+ struct vs_mbuf *mbuf) {}
+
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+#endif /* _VSERVICES_DEBUG_H */
diff --git a/drivers/vservices/devio.c b/drivers/vservices/devio.c
new file mode 100644
index 0000000..b3ed4ab
--- /dev/null
+++ b/drivers/vservices/devio.c
@@ -0,0 +1,1059 @@
+/*
+ * devio.c - cdev I/O for service devices
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd
+ * Author: Philip Derrin <philip@cog.systems>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/cdev.h>
+#include <linux/pagemap.h>
+#include <linux/fs.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/module.h>
+#include <linux/spinlock.h>
+#include <linux/uio.h>
+#include <linux/uaccess.h>
+#include <linux/poll.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+#include <vservices/ioctl.h>
+#include "session.h"
+
+#define VSERVICES_DEVICE_MAX (VS_MAX_SERVICES * VS_MAX_SESSIONS)
+
+struct vs_devio_priv {
+ struct kref kref;
+ bool running, reset;
+
+ /* Receive queue */
+ wait_queue_head_t recv_wq;
+ atomic_t notify_pending;
+ struct list_head recv_queue;
+};
+
+static void
+vs_devio_priv_free(struct kref *kref)
+{
+ struct vs_devio_priv *priv = container_of(kref, struct vs_devio_priv,
+ kref);
+
+ WARN_ON(priv->running);
+ WARN_ON(!list_empty_careful(&priv->recv_queue));
+ WARN_ON(waitqueue_active(&priv->recv_wq));
+
+ kfree(priv);
+}
+
+static void vs_devio_priv_put(struct vs_devio_priv *priv)
+{
+ kref_put(&priv->kref, vs_devio_priv_free);
+}
+
+static int
+vs_devio_service_probe(struct vs_service_device *service)
+{
+ struct vs_devio_priv *priv;
+
+ priv = kmalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ kref_init(&priv->kref);
+ priv->running = false;
+ priv->reset = false;
+ init_waitqueue_head(&priv->recv_wq);
+ atomic_set(&priv->notify_pending, 0);
+ INIT_LIST_HEAD(&priv->recv_queue);
+
+ dev_set_drvdata(&service->dev, priv);
+
+ wake_up(&service->quota_wq);
+
+ return 0;
+}
+
+static int
+vs_devio_service_remove(struct vs_service_device *service)
+{
+ struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+ WARN_ON(priv->running);
+ WARN_ON(!list_empty_careful(&priv->recv_queue));
+ WARN_ON(waitqueue_active(&priv->recv_wq));
+
+ vs_devio_priv_put(priv);
+
+ return 0;
+}
+
+static int
+vs_devio_service_receive(struct vs_service_device *service,
+ struct vs_mbuf *mbuf)
+{
+ struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+ WARN_ON(!priv->running);
+
+ spin_lock(&priv->recv_wq.lock);
+ list_add_tail(&mbuf->queue, &priv->recv_queue);
+ wake_up_locked(&priv->recv_wq);
+ spin_unlock(&priv->recv_wq.lock);
+
+ return 0;
+}
+
+static void
+vs_devio_service_notify(struct vs_service_device *service, u32 flags)
+{
+ struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+ int old, cur;
+
+ WARN_ON(!priv->running);
+
+ if (!flags)
+ return;
+
+ /* open-coded atomic_or() */
+ cur = atomic_read(&priv->notify_pending);
+ while ((old = atomic_cmpxchg(&priv->notify_pending,
+ cur, cur | flags)) != cur)
+ cur = old;
+
+ wake_up(&priv->recv_wq);
+}
+
+static void
+vs_devio_service_start(struct vs_service_device *service)
+{
+ struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+
+ if (!priv->reset) {
+ WARN_ON(priv->running);
+ priv->running = true;
+ wake_up(&service->quota_wq);
+ }
+}
+
+static void
+vs_devio_service_reset(struct vs_service_device *service)
+{
+ struct vs_devio_priv *priv = dev_get_drvdata(&service->dev);
+ struct vs_mbuf *mbuf, *tmp;
+
+ WARN_ON(!priv->running && !priv->reset);
+
+ /*
+ * Mark the service as being in reset. This flag can never be cleared
+ * on an open device; the user must acknowledge the reset by closing
+ * and reopening the device.
+ */
+ priv->reset = true;
+ priv->running = false;
+
+ spin_lock_irq(&priv->recv_wq.lock);
+ list_for_each_entry_safe(mbuf, tmp, &priv->recv_queue, queue)
+ vs_service_free_mbuf(service, mbuf);
+ INIT_LIST_HEAD(&priv->recv_queue);
+ spin_unlock_irq(&priv->recv_wq.lock);
+ wake_up_all(&priv->recv_wq);
+}
+
+/*
+ * This driver will be registered by the core server module, which must also
+ * set its bus and owner function pointers.
+ */
+struct vs_service_driver vs_devio_server_driver = {
+ /* No protocol, so the normal bus match will never bind this. */
+ .protocol = NULL,
+ .is_server = true,
+ .rx_atomic = true,
+
+ .probe = vs_devio_service_probe,
+ .remove = vs_devio_service_remove,
+ .receive = vs_devio_service_receive,
+ .notify = vs_devio_service_notify,
+ .start = vs_devio_service_start,
+ .reset = vs_devio_service_reset,
+
+ /*
+ * Set reasonable default quotas. These can be overridden by passing
+ * nonzero values to IOCTL_VS_BIND_SERVER, which will set the
+ * service's *_quota_set fields.
+ */
+ .in_quota_min = 1,
+ .in_quota_best = 8,
+ .out_quota_min = 1,
+ .out_quota_best = 8,
+
+ /* Mark the notify counts as invalid; the service's will be used. */
+ .in_notify_count = (unsigned)-1,
+ .out_notify_count = (unsigned)-1,
+
+ .driver = {
+ .name = "vservices-server-devio",
+ .owner = NULL, /* set by core server */
+ .bus = NULL, /* set by core server */
+ .suppress_bind_attrs = true, /* see vs_devio_poll */
+ },
+};
+EXPORT_SYMBOL_GPL(vs_devio_server_driver);
+
+static int
+vs_devio_bind_server(struct vs_service_device *service,
+ struct vs_ioctl_bind *bind)
+{
+ int ret = -ENODEV;
+
+ /* Ensure the server module is loaded and the driver is registered. */
+ if (!try_module_get(vs_devio_server_driver.driver.owner))
+ goto fail_module_get;
+
+ device_lock(&service->dev);
+ ret = -EBUSY;
+ if (service->dev.driver != NULL)
+ goto fail_device_unbound;
+
+ /* Set up the quota and notify counts. */
+ service->in_quota_set = bind->recv_quota;
+ service->out_quota_set = bind->send_quota;
+ service->notify_send_bits = bind->send_notify_bits;
+ service->notify_recv_bits = bind->recv_notify_bits;
+
+ /* Manually probe the driver. */
+ service->dev.driver = &vs_devio_server_driver.driver;
+ ret = service->dev.bus->probe(&service->dev);
+ if (ret < 0)
+ goto fail_probe_driver;
+
+ ret = device_bind_driver(&service->dev);
+ if (ret < 0)
+ goto fail_bind_driver;
+
+ /* Pass the allocated quotas back to the user. */
+ bind->recv_quota = service->recv_quota;
+ bind->send_quota = service->send_quota;
+ bind->msg_size = vs_service_max_mbuf_size(service);
+
+ device_unlock(&service->dev);
+ module_put(vs_devio_server_driver.driver.owner);
+
+ return 0;
+
+fail_bind_driver:
+ ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+ service->dev.driver = NULL;
+fail_device_unbound:
+ device_unlock(&service->dev);
+ module_put(vs_devio_server_driver.driver.owner);
+fail_module_get:
+ return ret;
+}
+
+/*
+ * This driver will be registered by the core client module, which must also
+ * set its bus and owner pointers.
+ */
+struct vs_service_driver vs_devio_client_driver = {
+ /* No protocol, so the normal bus match will never bind this. */
+ .protocol = NULL,
+ .is_server = false,
+ .rx_atomic = true,
+
+ .probe = vs_devio_service_probe,
+ .remove = vs_devio_service_remove,
+ .receive = vs_devio_service_receive,
+ .notify = vs_devio_service_notify,
+ .start = vs_devio_service_start,
+ .reset = vs_devio_service_reset,
+
+ .driver = {
+ .name = "vservices-client-devio",
+ .owner = NULL, /* set by core client */
+ .bus = NULL, /* set by core client */
+ .suppress_bind_attrs = true, /* see vs_devio_poll */
+ },
+};
+EXPORT_SYMBOL_GPL(vs_devio_client_driver);
+
+static int
+vs_devio_bind_client(struct vs_service_device *service,
+ struct vs_ioctl_bind *bind)
+{
+ int ret = -ENODEV;
+
+ /* Ensure the client module is loaded and the driver is registered. */
+ if (!try_module_get(vs_devio_client_driver.driver.owner))
+ goto fail_module_get;
+
+ device_lock(&service->dev);
+ ret = -EBUSY;
+ if (service->dev.driver != NULL)
+ goto fail_device_unbound;
+
+ /* Manually probe the driver. */
+ service->dev.driver = &vs_devio_client_driver.driver;
+ ret = service->dev.bus->probe(&service->dev);
+ if (ret < 0)
+ goto fail_probe_driver;
+
+ ret = device_bind_driver(&service->dev);
+ if (ret < 0)
+ goto fail_bind_driver;
+
+ /* Pass the allocated quotas back to the user. */
+ bind->recv_quota = service->recv_quota;
+ bind->send_quota = service->send_quota;
+ bind->msg_size = vs_service_max_mbuf_size(service);
+ bind->send_notify_bits = service->notify_send_bits;
+ bind->recv_notify_bits = service->notify_recv_bits;
+
+ device_unlock(&service->dev);
+ module_put(vs_devio_client_driver.driver.owner);
+
+ return 0;
+
+fail_bind_driver:
+ ret = service->dev.bus->remove(&service->dev);
+fail_probe_driver:
+ service->dev.driver = NULL;
+fail_device_unbound:
+ device_unlock(&service->dev);
+ module_put(vs_devio_client_driver.driver.owner);
+fail_module_get:
+ return ret;
+}
+
+static struct vs_devio_priv *
+vs_devio_priv_get_from_service(struct vs_service_device *service)
+{
+ struct vs_devio_priv *priv = NULL;
+ struct device_driver *drv;
+
+ if (!service)
+ return NULL;
+
+ device_lock(&service->dev);
+ drv = service->dev.driver;
+
+ if ((drv == &vs_devio_client_driver.driver) ||
+ (drv == &vs_devio_server_driver.driver)) {
+ vs_service_state_lock(service);
+ priv = dev_get_drvdata(&service->dev);
+ if (priv)
+ kref_get(&priv->kref);
+ vs_service_state_unlock(service);
+ }
+
+ device_unlock(&service->dev);
+
+ return priv;
+}
+
+static int
+vs_devio_open(struct inode *inode, struct file *file)
+{
+ struct vs_service_device *service;
+
+ if (imajor(inode) != vservices_cdev_major)
+ return -ENODEV;
+
+ service = vs_service_lookup_by_devt(inode->i_rdev);
+ if (!service)
+ return -ENODEV;
+
+ file->private_data = service;
+
+ return 0;
+}
+
+static int
+vs_devio_release(struct inode *inode, struct file *file)
+{
+ struct vs_service_device *service = file->private_data;
+
+ if (service) {
+ struct vs_devio_priv *priv =
+ vs_devio_priv_get_from_service(service);
+
+ if (priv) {
+ device_release_driver(&service->dev);
+ vs_devio_priv_put(priv);
+ }
+
+ file->private_data = NULL;
+ vs_put_service(service);
+ }
+
+ return 0;
+}
+
+static struct iovec *
+vs_devio_check_iov(struct vs_ioctl_iovec *io, bool is_send, ssize_t *total)
+{
+ struct iovec *iov;
+ unsigned i;
+ int ret;
+
+ if (io->iovcnt > UIO_MAXIOV)
+ return ERR_PTR(-EINVAL);
+
+ iov = kmalloc(sizeof(*iov) * io->iovcnt, GFP_KERNEL);
+ if (!iov)
+ return ERR_PTR(-ENOMEM);
+
+ if (copy_from_user(iov, io->iov, sizeof(*iov) * io->iovcnt)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ *total = 0;
+ for (i = 0; i < io->iovcnt; i++) {
+ ssize_t iov_len = (ssize_t)iov[i].iov_len;
+
+ if (iov_len > MAX_RW_COUNT - *total) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+ iov[i].iov_base, iov_len)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ *total += iov_len;
+ }
+
+ return iov;
+
+fail:
+ kfree(iov);
+ return ERR_PTR(ret);
+}
+
+static ssize_t
+vs_devio_send(struct vs_service_device *service, struct iovec *iov,
+ size_t iovcnt, ssize_t to_send, bool nonblocking)
+{
+ struct vs_mbuf *mbuf = NULL;
+ struct vs_devio_priv *priv;
+ unsigned i;
+ ssize_t offset = 0;
+ ssize_t ret;
+ DEFINE_WAIT(wait);
+
+ priv = vs_devio_priv_get_from_service(service);
+ ret = -ENODEV;
+ if (!priv)
+ goto fail_priv_get;
+
+ vs_service_state_lock(service);
+
+ /*
+ * Waiting alloc. We must open-code this because there is no real
+ * state structure or base state.
+ */
+ ret = 0;
+ while (!vs_service_send_mbufs_available(service)) {
+ if (nonblocking) {
+ ret = -EAGAIN;
+ break;
+ }
+
+ if (signal_pending(current)) {
+ ret = -ERESTARTSYS;
+ break;
+ }
+
+ prepare_to_wait_exclusive(&service->quota_wq, &wait,
+ TASK_INTERRUPTIBLE);
+
+ vs_service_state_unlock(service);
+ schedule();
+ vs_service_state_lock(service);
+
+ if (priv->reset) {
+ ret = -ECONNRESET;
+ break;
+ }
+
+ if (!priv->running) {
+ ret = -ENOTCONN;
+ break;
+ }
+ }
+ finish_wait(&service->quota_wq, &wait);
+
+ if (ret)
+ goto fail_alloc;
+
+ mbuf = vs_service_alloc_mbuf(service, to_send, GFP_KERNEL);
+ if (IS_ERR(mbuf)) {
+ ret = PTR_ERR(mbuf);
+ goto fail_alloc;
+ }
+
+ /* Ready to send; copy data into the mbuf. */
+ ret = -EFAULT;
+ for (i = 0; i < iovcnt; i++) {
+ if (copy_from_user(mbuf->data + offset, iov[i].iov_base,
+ iov[i].iov_len))
+ goto fail_copy;
+ offset += iov[i].iov_len;
+ }
+ mbuf->size = to_send;
+
+ /* Send the message. */
+ ret = vs_service_send(service, mbuf);
+ if (ret < 0)
+ goto fail_send;
+
+ /* Wake the next waiter, if there's more quota available. */
+ if (waitqueue_active(&service->quota_wq) &&
+ vs_service_send_mbufs_available(service) > 0)
+ wake_up(&service->quota_wq);
+
+ vs_service_state_unlock(service);
+ vs_devio_priv_put(priv);
+
+ return to_send;
+
+fail_send:
+fail_copy:
+ vs_service_free_mbuf(service, mbuf);
+ wake_up(&service->quota_wq);
+fail_alloc:
+ vs_service_state_unlock(service);
+ vs_devio_priv_put(priv);
+fail_priv_get:
+ return ret;
+}
+
+static ssize_t
+vs_devio_recv(struct vs_service_device *service, struct iovec *iov,
+ size_t iovcnt, u32 *notify_bits, ssize_t recv_space,
+ bool nonblocking)
+{
+ struct vs_mbuf *mbuf = NULL;
+ struct vs_devio_priv *priv;
+ unsigned i;
+ ssize_t offset = 0;
+ ssize_t ret;
+ DEFINE_WAIT(wait);
+
+ priv = vs_devio_priv_get_from_service(service);
+ ret = -ENODEV;
+ if (!priv)
+ goto fail_priv_get;
+
+ /* Take the recv_wq lock, which also protects recv_queue. */
+ spin_lock_irq(&priv->recv_wq.lock);
+
+ /* Wait for a message, notification, or reset. */
+ ret = wait_event_interruptible_exclusive_locked_irq(priv->recv_wq,
+ !list_empty(&priv->recv_queue) || priv->reset ||
+ atomic_read(&priv->notify_pending) || nonblocking);
+
+ if (priv->reset)
+ ret = -ECONNRESET; /* Service reset */
+ else if (!ret && list_empty(&priv->recv_queue))
+ ret = -EAGAIN; /* Nonblocking, or notification */
+
+ if (ret < 0) {
+ spin_unlock_irq(&priv->recv_wq.lock);
+ goto no_mbuf;
+ }
+
+ /* Take the first mbuf from the list, and check its size. */
+ mbuf = list_first_entry(&priv->recv_queue, struct vs_mbuf, queue);
+ if (mbuf->size > recv_space) {
+ spin_unlock_irq(&priv->recv_wq.lock);
+ ret = -EMSGSIZE;
+ goto fail_msg_size;
+ }
+ list_del_init(&mbuf->queue);
+
+ spin_unlock_irq(&priv->recv_wq.lock);
+
+ /* Copy to user. */
+ ret = -EFAULT;
+ for (i = 0; (mbuf->size > offset) && (i < iovcnt); i++) {
+ size_t len = min(mbuf->size - offset, iov[i].iov_len);
+ if (copy_to_user(iov[i].iov_base, mbuf->data + offset, len))
+ goto fail_copy;
+ offset += len;
+ }
+ ret = offset;
+
+no_mbuf:
+ /*
+ * Read and clear the pending notification bits. If any notifications
+ * are received, don't return an error, even if we failed to receive a
+ * message.
+ */
+ *notify_bits = atomic_xchg(&priv->notify_pending, 0);
+ if ((ret < 0) && *notify_bits)
+ ret = 0;
+
+fail_copy:
+ if (mbuf)
+ vs_service_free_mbuf(service, mbuf);
+fail_msg_size:
+ vs_devio_priv_put(priv);
+fail_priv_get:
+ return ret;
+}
+
+static int
+vs_devio_check_perms(struct file *file, unsigned flags)
+{
+ if ((flags & MAY_READ) & !(file->f_mode & FMODE_READ))
+ return -EBADF;
+
+ if ((flags & MAY_WRITE) & !(file->f_mode & FMODE_WRITE))
+ return -EBADF;
+
+ return security_file_permission(file, flags);
+}
+
+static long
+vs_devio_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *ptr = (void __user *)arg;
+ struct vs_service_device *service = file->private_data;
+ struct vs_ioctl_bind bind;
+ struct vs_ioctl_iovec io;
+ u32 flags;
+ long ret;
+ ssize_t iov_total;
+ struct iovec *iov;
+
+ if (!service)
+ return -ENODEV;
+
+ switch (cmd) {
+ case IOCTL_VS_RESET_SERVICE:
+ ret = vs_devio_check_perms(file, MAY_WRITE);
+ if (ret < 0)
+ break;
+ ret = vs_service_reset(service, service);
+ break;
+ case IOCTL_VS_GET_NAME:
+ ret = vs_devio_check_perms(file, MAY_READ);
+ if (ret < 0)
+ break;
+ if (service->name != NULL) {
+ size_t len = strnlen(service->name,
+ _IOC_SIZE(IOCTL_VS_GET_NAME) - 1);
+ if (copy_to_user(ptr, service->name, len + 1))
+ ret = -EFAULT;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case IOCTL_VS_GET_PROTOCOL:
+ ret = vs_devio_check_perms(file, MAY_READ);
+ if (ret < 0)
+ break;
+ if (service->protocol != NULL) {
+ size_t len = strnlen(service->protocol,
+ _IOC_SIZE(IOCTL_VS_GET_PROTOCOL) - 1);
+ if (copy_to_user(ptr, service->protocol, len + 1))
+ ret = -EFAULT;
+ } else {
+ ret = -EINVAL;
+ }
+ break;
+ case IOCTL_VS_BIND_CLIENT:
+ ret = vs_devio_check_perms(file, MAY_EXEC);
+ if (ret < 0)
+ break;
+ ret = vs_devio_bind_client(service, &bind);
+ if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+ ret = -EFAULT;
+ break;
+ case IOCTL_VS_BIND_SERVER:
+ ret = vs_devio_check_perms(file, MAY_EXEC);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&bind, ptr, sizeof(bind))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = vs_devio_bind_server(service, &bind);
+ if (!ret && copy_to_user(ptr, &bind, sizeof(bind)))
+ ret = -EFAULT;
+ break;
+ case IOCTL_VS_NOTIFY:
+ ret = vs_devio_check_perms(file, MAY_WRITE);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&flags, ptr, sizeof(flags))) {
+ ret = -EFAULT;
+ break;
+ }
+ ret = vs_service_notify(service, flags);
+ break;
+ case IOCTL_VS_SEND:
+ ret = vs_devio_check_perms(file, MAY_WRITE);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&io, ptr, sizeof(io))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ iov = vs_devio_check_iov(&io, true, &iov_total);
+ if (IS_ERR(iov)) {
+ ret = PTR_ERR(iov);
+ break;
+ }
+
+ ret = vs_devio_send(service, iov, io.iovcnt, iov_total,
+ file->f_flags & O_NONBLOCK);
+ kfree(iov);
+ break;
+ case IOCTL_VS_RECV:
+ ret = vs_devio_check_perms(file, MAY_READ);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&io, ptr, sizeof(io))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ iov = vs_devio_check_iov(&io, true, &iov_total);
+ if (IS_ERR(iov)) {
+ ret = PTR_ERR(iov);
+ break;
+ }
+
+ ret = vs_devio_recv(service, iov, io.iovcnt,
+ &io.notify_bits, iov_total,
+ file->f_flags & O_NONBLOCK);
+ kfree(iov);
+
+ if (ret >= 0) {
+ u32 __user *notify_bits_ptr = ptr + offsetof(
+ struct vs_ioctl_iovec, notify_bits);
+ if (copy_to_user(notify_bits_ptr, &io.notify_bits,
+ sizeof(io.notify_bits)))
+ ret = -EFAULT;
+ }
+ break;
+ default:
+ dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+ arg);
+ ret = -ENOSYS;
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_COMPAT
+
+struct vs_compat_ioctl_bind {
+ __u32 send_quota;
+ __u32 recv_quota;
+ __u32 send_notify_bits;
+ __u32 recv_notify_bits;
+ compat_size_t msg_size;
+};
+
+#define compat_ioctl_bind_conv(dest, src) ({ \
+ dest.send_quota = src.send_quota; \
+ dest.recv_quota = src.recv_quota; \
+ dest.send_notify_bits = src.send_notify_bits; \
+ dest.recv_notify_bits = src.recv_notify_bits; \
+ dest.msg_size = (compat_size_t)src.msg_size; \
+})
+
+#define COMPAT_IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_compat_ioctl_bind)
+#define COMPAT_IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_compat_ioctl_bind)
+
+struct vs_compat_ioctl_iovec {
+ union {
+ __u32 iovcnt; /* input */
+ __u32 notify_bits; /* output (recv only) */
+ };
+ compat_uptr_t iov;
+};
+
+#define COMPAT_IOCTL_VS_SEND \
+ _IOW('4', 6, struct vs_compat_ioctl_iovec)
+#define COMPAT_IOCTL_VS_RECV \
+ _IOWR('4', 7, struct vs_compat_ioctl_iovec)
+
+static struct iovec *
+vs_devio_check_compat_iov(struct vs_compat_ioctl_iovec *c_io,
+ bool is_send, ssize_t *total)
+{
+ struct iovec *iov;
+ struct compat_iovec *c_iov;
+
+ unsigned i;
+ int ret;
+
+ if (c_io->iovcnt > UIO_MAXIOV)
+ return ERR_PTR(-EINVAL);
+
+ c_iov = kzalloc(sizeof(*c_iov) * c_io->iovcnt, GFP_KERNEL);
+ if (!c_iov)
+ return ERR_PTR(-ENOMEM);
+
+ iov = kzalloc(sizeof(*iov) * c_io->iovcnt, GFP_KERNEL);
+ if (!iov) {
+ kfree(c_iov);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (copy_from_user(c_iov, (struct compat_iovec __user *)
+ compat_ptr(c_io->iov), sizeof(*c_iov) * c_io->iovcnt)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ *total = 0;
+ for (i = 0; i < c_io->iovcnt; i++) {
+ ssize_t iov_len;
+ iov[i].iov_base = compat_ptr (c_iov[i].iov_base);
+ iov[i].iov_len = (compat_size_t) c_iov[i].iov_len;
+
+ iov_len = (ssize_t)iov[i].iov_len;
+
+ if (iov_len > MAX_RW_COUNT - *total) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!access_ok(is_send ? VERIFY_READ : VERIFY_WRITE,
+ iov[i].iov_base, iov_len)) {
+ ret = -EFAULT;
+ goto fail;
+ }
+
+ *total += iov_len;
+ }
+
+ kfree (c_iov);
+ return iov;
+
+fail:
+ kfree(c_iov);
+ kfree(iov);
+ return ERR_PTR(ret);
+}
+
+static long
+vs_devio_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+ void __user *ptr = (void __user *)arg;
+ struct vs_service_device *service = file->private_data;
+ struct vs_ioctl_bind bind;
+ struct vs_compat_ioctl_bind compat_bind;
+ struct vs_compat_ioctl_iovec compat_io;
+ long ret;
+ ssize_t iov_total;
+ struct iovec *iov;
+
+ if (!service)
+ return -ENODEV;
+
+ switch (cmd) {
+ case IOCTL_VS_RESET_SERVICE:
+ case IOCTL_VS_GET_NAME:
+ case IOCTL_VS_GET_PROTOCOL:
+ return vs_devio_ioctl (file, cmd, arg);
+ case COMPAT_IOCTL_VS_SEND:
+ ret = vs_devio_check_perms(file, MAY_WRITE);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+ if (IS_ERR(iov)) {
+ ret = PTR_ERR(iov);
+ break;
+ }
+
+ ret = vs_devio_send(service, iov, compat_io.iovcnt, iov_total,
+ file->f_flags & O_NONBLOCK);
+ kfree(iov);
+
+ break;
+ case COMPAT_IOCTL_VS_RECV:
+ ret = vs_devio_check_perms(file, MAY_READ);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&compat_io, ptr, sizeof(compat_io))) {
+ ret = -EFAULT;
+ break;
+ }
+
+ iov = vs_devio_check_compat_iov(&compat_io, true, &iov_total);
+ if (IS_ERR(iov)) {
+ ret = PTR_ERR(iov);
+ break;
+ }
+
+ ret = vs_devio_recv(service, iov, compat_io.iovcnt,
+ &compat_io.notify_bits, iov_total,
+ file->f_flags & O_NONBLOCK);
+ kfree(iov);
+
+ if (ret >= 0) {
+ u32 __user *notify_bits_ptr = ptr + offsetof(
+ struct vs_compat_ioctl_iovec, notify_bits);
+ if (copy_to_user(notify_bits_ptr, &compat_io.notify_bits,
+ sizeof(compat_io.notify_bits)))
+ ret = -EFAULT;
+ }
+ break;
+ case COMPAT_IOCTL_VS_BIND_CLIENT:
+ ret = vs_devio_check_perms(file, MAY_EXEC);
+ if (ret < 0)
+ break;
+ ret = vs_devio_bind_client(service, &bind);
+ compat_ioctl_bind_conv(compat_bind, bind);
+ if (!ret && copy_to_user(ptr, &compat_bind,
+ sizeof(compat_bind)))
+ ret = -EFAULT;
+ break;
+ case COMPAT_IOCTL_VS_BIND_SERVER:
+ ret = vs_devio_check_perms(file, MAY_EXEC);
+ if (ret < 0)
+ break;
+ if (copy_from_user(&compat_bind, ptr, sizeof(compat_bind))) {
+ ret = -EFAULT;
+ break;
+ }
+ compat_ioctl_bind_conv(bind, compat_bind);
+ ret = vs_devio_bind_server(service, &bind);
+ compat_ioctl_bind_conv(compat_bind, bind);
+ if (!ret && copy_to_user(ptr, &compat_bind,
+ sizeof(compat_bind)))
+ ret = -EFAULT;
+ break;
+ default:
+ dev_dbg(&service->dev, "Unknown ioctl %#x, arg: %lx\n", cmd,
+ arg);
+ ret = -ENOSYS;
+ break;
+ }
+
+ return ret;
+}
+
+#endif /* CONFIG_COMPAT */
+
+static unsigned int
+vs_devio_poll(struct file *file, struct poll_table_struct *wait)
+{
+ struct vs_service_device *service = file->private_data;
+ struct vs_devio_priv *priv = vs_devio_priv_get_from_service(service);
+ unsigned int flags = 0;
+
+ poll_wait(file, &service->quota_wq, wait);
+
+ if (priv) {
+ /*
+ * Note: there is no way for us to ensure that all poll
+ * waiters on a given workqueue have gone away, other than to
+ * actually close the file. So, this poll_wait() is only safe
+ * if we never release our claim on the service before the
+ * file is closed.
+ *
+ * We try to guarantee this by only unbinding the devio driver
+ * on close, and setting suppress_bind_attrs in the driver so
+ * root can't unbind us with sysfs.
+ */
+ poll_wait(file, &priv->recv_wq, wait);
+
+ if (priv->reset) {
+ /* Service reset; raise poll error. */
+ flags |= POLLERR | POLLHUP;
+ } else if (priv->running) {
+ if (!list_empty_careful(&priv->recv_queue))
+ flags |= POLLRDNORM | POLLIN;
+ if (atomic_read(&priv->notify_pending))
+ flags |= POLLRDNORM | POLLIN;
+ if (vs_service_send_mbufs_available(service) > 0)
+ flags |= POLLWRNORM | POLLOUT;
+ }
+
+ vs_devio_priv_put(priv);
+ } else {
+ /* No driver attached. Return error flags. */
+ flags |= POLLERR | POLLHUP;
+ }
+
+ return flags;
+}
+
+static const struct file_operations vs_fops = {
+ .owner = THIS_MODULE,
+ .open = vs_devio_open,
+ .release = vs_devio_release,
+ .unlocked_ioctl = vs_devio_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = vs_devio_compat_ioctl,
+#endif
+ .poll = vs_devio_poll,
+};
+
+int vservices_cdev_major;
+static struct cdev vs_cdev;
+
+int __init
+vs_devio_init(void)
+{
+ dev_t dev;
+ int r;
+
+ r = alloc_chrdev_region(&dev, 0, VSERVICES_DEVICE_MAX,
+ "vs_service");
+ if (r < 0)
+ goto fail_alloc_chrdev;
+ vservices_cdev_major = MAJOR(dev);
+
+ cdev_init(&vs_cdev, &vs_fops);
+ r = cdev_add(&vs_cdev, dev, VSERVICES_DEVICE_MAX);
+ if (r < 0)
+ goto fail_cdev_add;
+
+ return 0;
+
+fail_cdev_add:
+ unregister_chrdev_region(dev, VSERVICES_DEVICE_MAX);
+fail_alloc_chrdev:
+ return r;
+}
+
+void __exit
+vs_devio_exit(void)
+{
+ cdev_del(&vs_cdev);
+ unregister_chrdev_region(MKDEV(vservices_cdev_major, 0),
+ VSERVICES_DEVICE_MAX);
+}
diff --git a/drivers/vservices/protocol/Kconfig b/drivers/vservices/protocol/Kconfig
new file mode 100644
index 0000000..e0f2798c
--- /dev/null
+++ b/drivers/vservices/protocol/Kconfig
@@ -0,0 +1,44 @@
+#
+# vServices protocol drivers configuration
+#
+
+if VSERVICES_SERVER || VSERVICES_CLIENT
+
+menu "Protocol drivers"
+config VSERVICES_PROTOCOL_BLOCK
+ bool
+
+config VSERVICES_PROTOCOL_BLOCK_SERVER
+ tristate "Block server protocol"
+ depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+ select VSERVICES_PROTOCOL_BLOCK
+ help
+ This option adds support for Virtual Services block protocol server.
+
+config VSERVICES_PROTOCOL_BLOCK_CLIENT
+ tristate "Block client protocol"
+ depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+ select VSERVICES_PROTOCOL_BLOCK
+ help
+ This option adds support for Virtual Services block protocol client.
+
+config VSERVICES_PROTOCOL_SERIAL
+ bool
+
+config VSERVICES_PROTOCOL_SERIAL_SERVER
+ tristate "Serial server protocol"
+ depends on VSERVICES_SUPPORT && VSERVICES_SERVER
+ select VSERVICES_PROTOCOL_SERIAL
+ help
+ This option adds support for Virtual Services serial protocol server.
+
+config VSERVICES_PROTOCOL_SERIAL_CLIENT
+ tristate "Serial client protocol"
+ depends on VSERVICES_SUPPORT && VSERVICES_CLIENT
+ select VSERVICES_PROTOCOL_SERIAL
+ help
+ This option adds support for Virtual Services serial protocol client.
+
+endmenu
+
+endif # VSERVICES_SERVER || VSERVICES_CLIENT
diff --git a/drivers/vservices/protocol/Makefile b/drivers/vservices/protocol/Makefile
new file mode 100644
index 0000000..0c714e0
--- /dev/null
+++ b/drivers/vservices/protocol/Makefile
@@ -0,0 +1,5 @@
+# This is a autogenerated Makefile for vservice-linux-stacks
+
+obj-$(CONFIG_VSERVICES_SUPPORT) += core/
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK) += block/
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL) += serial/
diff --git a/drivers/vservices/protocol/block/Makefile b/drivers/vservices/protocol/block/Makefile
new file mode 100644
index 0000000..325b57e
--- /dev/null
+++ b/drivers/vservices/protocol/block/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK_SERVER) += vservices_protocol_block_server.o
+vservices_protocol_block_server-objs = server.o
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_BLOCK_CLIENT) += vservices_protocol_block_client.o
+vservices_protocol_block_client-objs = client.o
diff --git a/drivers/vservices/protocol/block/client.c b/drivers/vservices/protocol/block/client.c
new file mode 100644
index 0000000..702a30a8
--- /dev/null
+++ b/drivers/vservices/protocol/block/client.c
@@ -0,0 +1,1186 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the block client protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+static int _vs_client_block_req_open(struct vs_client_block_state *_state);
+
+/*** Linux driver model integration ***/
+struct vs_block_client_driver {
+ struct vs_client_block *client;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+ container_of(d, struct vs_block_client_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ int i __maybe_unused;
+
+ /* Clear out pending read commands */
+ for_each_set_bit(i, state->state.io.read_bitmask,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING) {
+ void *tag = state->state.io.read_tags[i];
+
+ if (client->io.nack_read)
+ client->io.nack_read(state, tag,
+ VSERVICE_BLOCK_SERVICE_RESET);
+
+ __clear_bit(i, state->state.io.read_bitmask);
+ }
+
+ /* Clear out pending write commands */
+ for_each_set_bit(i, state->state.io.write_bitmask,
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING) {
+ void *tag = state->state.io.write_tags[i];
+
+ if (client->io.nack_write)
+ client->io.nack_write(state, tag,
+ VSERVICE_BLOCK_SERVICE_RESET);
+
+ __clear_bit(i, state->state.io.write_bitmask);
+ }
+
+}
+
+static void block_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ _vs_client_block_req_open(state);
+
+ vs_service_state_unlock(service);
+}
+
+static void block_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (client->closed)
+ client->closed(state);
+
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void block_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ _vs_client_block_req_open(state);
+
+ vs_service_state_unlock_bh(service);
+}
+
+static void block_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock_bh(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (client->closed)
+ client->closed(state);
+
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static int block_client_probe(struct vs_service_device *service);
+static int block_client_remove(struct vs_service_device *service);
+static int block_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void block_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void block_handle_start(struct vs_service_device *service);
+static void block_handle_start_bh(struct vs_service_device *service);
+static void block_handle_reset(struct vs_service_device *service);
+static void block_handle_reset_bh(struct vs_service_device *service);
+static int block_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_block_client_register(struct vs_client_block *client,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_block_client_driver *driver;
+
+ if (client->tx_atomic && !client->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ client->driver = &driver->vsdrv;
+ driver->client = client;
+
+ driver->vsdrv.protocol = VSERVICE_BLOCK_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = false;
+ driver->vsdrv.rx_atomic = client->rx_atomic;
+ driver->vsdrv.tx_atomic = client->tx_atomic;
+
+ driver->vsdrv.probe = block_client_probe;
+ driver->vsdrv.remove = block_client_remove;
+ driver->vsdrv.receive = block_handle_message;
+ driver->vsdrv.notify = block_handle_notify;
+ driver->vsdrv.start = client->tx_atomic ?
+ block_handle_start_bh : block_handle_start;
+ driver->vsdrv.reset = client->tx_atomic ?
+ block_handle_reset_bh : block_handle_reset;
+ driver->vsdrv.tx_ready = block_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ client->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_block_client_register);
+
+int vservice_block_client_unregister(struct vs_client_block *client)
+{
+ struct vs_block_client_driver *driver;
+
+ if (!client->driver)
+ return 0;
+
+ driver = to_client_driver(client->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ client->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_block_client_unregister);
+
+static int block_client_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client = to_client_driver(vsdrv)->client;
+ struct vs_client_block_state *state;
+
+ state = client->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int block_client_remove(struct vs_service_device *service)
+{
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client = to_client_driver(vsdrv)->client;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ client->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int block_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_block *client = to_client_driver(vsdrv)->client;
+ struct vs_client_block_state *state = dev_get_drvdata(&service->dev);
+
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+ return 0;
+
+ if (client->tx_ready)
+ client->tx_ready(state);
+
+ return 0;
+}
+
+static int _vs_client_block_req_open(struct vs_client_block_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_block *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_REQ_OPEN;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_open);
+static int _vs_client_block_req_close(struct vs_client_block_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_block *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_REQ_CLOSE;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_close);
+static int _vs_client_block_req_reopen(struct vs_client_block_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_block *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_REQ_REOPEN;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_block_req_reopen);
+static int
+block_base_handle_ack_open(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 28UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+ _state->io.sector_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ _state->io.segment_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+ _state->readonly =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ _state->sector_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ _state->segment_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+ _state->device_sectors =
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 12UL);
+ _state->flushable =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+ _state->committable =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ _client->opened(_state);
+ return 0;
+
+}
+
+static int
+block_base_handle_nack_open(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ dev_err(&VS_STATE_SERVICE_PTR(_state)->dev,
+ "Open operation failed for device %s\n",
+ VS_STATE_SERVICE_PTR(_state)->name);
+
+ return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_open);
+static int
+block_base_handle_ack_close(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return 0;
+
+}
+
+static int
+block_base_handle_nack_close(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_close);
+static int
+block_base_handle_ack_reopen(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE__RESET;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->reopened) {
+ _client->reopened(_state);
+ return 0;
+ }
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return _vs_client_block_req_open(_state);
+
+}
+
+static int
+block_base_handle_nack_reopen(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return 0;
+
+}
+
+EXPORT_SYMBOL(block_base_handle_ack_reopen);
+int vs_client_block_io_getbufs_ack_read(struct vs_client_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_ACK_READ;
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+ const size_t _min_size = _max_size - _state->io.segment_size;
+ size_t _exact_size;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+ return -EINVAL;
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ data->size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ data->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+ sizeof(uint32_t));
+ data->max_size = data->size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->io.segment_size - data->size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_getbufs_ack_read);
+int vs_client_block_io_free_ack_read(struct vs_client_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_free_ack_read);
+struct vs_mbuf *vs_client_block_io_alloc_req_write(struct vs_client_block_state
+ *_state,
+ struct vs_pbuf *data,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+ const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_REQ_WRITE;
+ const uint32_t _msg_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return _mbuf;
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENOMEM);
+ }
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+ if (!data)
+ goto fail;
+ data->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 28UL + sizeof(uint32_t));
+ data->size = _state->io.segment_size;
+ data->max_size = data->size;
+ return _mbuf;
+
+ fail:
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return NULL;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_alloc_req_write);
+int vs_client_block_io_free_req_write(struct vs_client_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_free_req_write);
+int
+vs_client_block_io_req_read(struct vs_client_block_state *_state, void *_opaque,
+ uint64_t sector_index, uint32_t num_sects,
+ bool nodelay, bool flush, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 24UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_block *_client =
+ to_client_driver(vsdrv)->client;
+ uint32_t _opaque_tmp;
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ _opaque_tmp =
+ find_first_zero_bit(_state->state.io.read_bitmask,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+ return -EPROTO;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = VSERVICE_BLOCK_IO_REQ_READ;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque_tmp;
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ sector_index;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+ num_sects;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+ nodelay;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+ flush;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.io.read_tags[_opaque_tmp] = _opaque;
+ __set_bit(_opaque_tmp, _state->state.io.read_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_req_read);
+int
+vs_client_block_io_req_write(struct vs_client_block_state *_state,
+ void *_opaque, uint64_t sector_index,
+ uint32_t num_sects, bool nodelay, bool flush,
+ bool commit, struct vs_pbuf data,
+ struct vs_mbuf *_mbuf)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_block *_client =
+ to_client_driver(vsdrv)->client;
+ uint32_t _opaque_tmp;
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ _opaque_tmp =
+ find_first_zero_bit(_state->state.io.write_bitmask,
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+ return -EPROTO;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+ VSERVICE_BLOCK_IO_REQ_WRITE)
+
+ return -EINVAL;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque_tmp;
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ sector_index;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+ num_sects;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+ nodelay;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+ flush;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+ commit;
+ if ((data.size + sizeof(vs_message_id_t) + 28UL) > VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ if (data.size < data.max_size)
+ VS_MBUF_SIZE(_mbuf) -= (data.max_size - data.size);
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 28UL) =
+ data.size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.io.write_tags[_opaque_tmp] = _opaque;
+ __set_bit(_opaque_tmp, _state->state.io.write_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_block_io_req_write);
+static int
+block_io_handle_ack_read(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+ void *_opaque;
+ struct vs_pbuf data;
+ const size_t _min_size = _max_size - _state->io.segment_size;
+ size_t _exact_size;
+ uint32_t _opaque_tmp;
+
+ /* The first check is to ensure the message isn't complete garbage */
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+ _opaque_tmp =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque_tmp, _state->state.io.read_bitmask))
+ return -EPROTO;
+ _opaque = _state->state.io.read_tags[_opaque_tmp];
+ __clear_bit(_opaque_tmp, _state->state.io.read_bitmask);
+
+ data.size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ data.data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+ sizeof(uint32_t));
+ data.max_size = data.size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->io.segment_size - data.size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+ if (_client->io.ack_read)
+ return _client->io.ack_read(_state, _opaque, data, _mbuf);
+ return 0;
+}
+
+static int
+block_io_handle_nack_read(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+ void *_opaque;
+ vservice_block_block_io_error_t err;
+ uint32_t _opaque_tmp;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ _opaque_tmp =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque_tmp, _state->state.io.read_bitmask))
+ return -EPROTO;
+ _opaque = _state->state.io.read_tags[_opaque_tmp];
+ __clear_bit(_opaque_tmp, _state->state.io.read_bitmask);
+ err =
+ *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+ sizeof(vs_message_id_t) +
+ 4UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->io.nack_read)
+ return _client->io.nack_read(_state, _opaque, err);
+ return 0;
+}
+
+EXPORT_SYMBOL(block_io_handle_ack_read);
+static int
+block_io_handle_ack_write(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+ void *_opaque;
+ uint32_t _opaque_tmp;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ _opaque_tmp =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque_tmp, _state->state.io.write_bitmask))
+ return -EPROTO;
+ _opaque = _state->state.io.write_tags[_opaque_tmp];
+ __clear_bit(_opaque_tmp, _state->state.io.write_bitmask);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->io.ack_write)
+ return _client->io.ack_write(_state, _opaque);
+ return 0;
+}
+
+static int
+block_io_handle_nack_write(const struct vs_client_block *_client,
+ struct vs_client_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+ void *_opaque;
+ vservice_block_block_io_error_t err;
+ uint32_t _opaque_tmp;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ _opaque_tmp =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_opaque_tmp >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque_tmp, _state->state.io.write_bitmask))
+ return -EPROTO;
+ _opaque = _state->state.io.write_tags[_opaque_tmp];
+ __clear_bit(_opaque_tmp, _state->state.io.write_bitmask);
+ err =
+ *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+ sizeof(vs_message_id_t) +
+ 4UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->io.nack_write)
+ return _client->io.nack_write(_state, _opaque, err);
+ return 0;
+}
+
+EXPORT_SYMBOL(block_io_handle_ack_write);
+static int
+block_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_client_block_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_block *client =
+ to_client_driver(vsdrv)->client;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+ case VSERVICE_BLOCK_BASE_ACK_OPEN:
+ ret = block_base_handle_ack_open(client, state, _mbuf);
+ break;
+ case VSERVICE_BLOCK_BASE_NACK_OPEN:
+ ret = block_base_handle_nack_open(client, state, _mbuf);
+ break;
+
+/* command in sync close */
+ case VSERVICE_BLOCK_BASE_ACK_CLOSE:
+ ret = block_base_handle_ack_close(client, state, _mbuf);
+ break;
+ case VSERVICE_BLOCK_BASE_NACK_CLOSE:
+ ret = block_base_handle_nack_close(client, state, _mbuf);
+ break;
+
+/* command in sync reopen */
+ case VSERVICE_BLOCK_BASE_ACK_REOPEN:
+ ret = block_base_handle_ack_reopen(client, state, _mbuf);
+ break;
+ case VSERVICE_BLOCK_BASE_NACK_REOPEN:
+ ret = block_base_handle_nack_reopen(client, state, _mbuf);
+ break;
+
+/** interface block_io **/
+/* command in parallel read */
+ case VSERVICE_BLOCK_IO_ACK_READ:
+ ret = block_io_handle_ack_read(client, state, _mbuf);
+ break;
+ case VSERVICE_BLOCK_IO_NACK_READ:
+ ret = block_io_handle_nack_read(client, state, _mbuf);
+ break;
+
+/* command in parallel write */
+ case VSERVICE_BLOCK_IO_ACK_WRITE:
+ ret = block_io_handle_ack_write(client, state, _mbuf);
+ break;
+ case VSERVICE_BLOCK_IO_NACK_WRITE:
+ ret = block_io_handle_nack_write(client, state, _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void block_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_client_block_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_block *client =
+ to_client_driver(vsdrv)->client;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface block_io **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+int vs_client_block_reopen(struct vs_client_block_state *_state)
+{
+ return _vs_client_block_req_reopen(_state);
+}
+
+EXPORT_SYMBOL(vs_client_block_reopen);
+
+int vs_client_block_close(struct vs_client_block_state *_state)
+{
+ return _vs_client_block_req_close(_state);
+}
+
+EXPORT_SYMBOL(vs_client_block_close);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services blockClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/block/server.c b/drivers/vservices/protocol/block/server.c
new file mode 100644
index 0000000..a4a7d1a
--- /dev/null
+++ b/drivers/vservices/protocol/block/server.c
@@ -0,0 +1,1371 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the block server protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/block/types.h>
+#include <vservices/protocol/block/common.h>
+#include <vservices/protocol/block/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_block_server_driver {
+ struct vs_server_block *server;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+ container_of(d, struct vs_block_server_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void block_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void block_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (server->closed)
+ server->closed(state);
+
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void block_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static void block_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock_bh(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (server->closed)
+ server->closed(state);
+
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static int block_server_probe(struct vs_service_device *service);
+static int block_server_remove(struct vs_service_device *service);
+static int block_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void block_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void block_handle_start(struct vs_service_device *service);
+static void block_handle_start_bh(struct vs_service_device *service);
+static void block_handle_reset(struct vs_service_device *service);
+static void block_handle_reset_bh(struct vs_service_device *service);
+static int block_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_block_server_register(struct vs_server_block *server,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_block_server_driver *driver;
+
+ if (server->tx_atomic && !server->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ server->driver = &driver->vsdrv;
+ driver->server = server;
+
+ driver->vsdrv.protocol = VSERVICE_BLOCK_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = true;
+ driver->vsdrv.rx_atomic = server->rx_atomic;
+ driver->vsdrv.tx_atomic = server->tx_atomic;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.in_quota_min = 1;
+ driver->vsdrv.in_quota_best = server->in_quota_best ?
+ server->in_quota_best : driver->vsdrv.in_quota_min;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.out_quota_min = 1;
+ driver->vsdrv.out_quota_best = server->out_quota_best ?
+ server->out_quota_best : driver->vsdrv.out_quota_min;
+ driver->vsdrv.in_notify_count = VSERVICE_BLOCK_NBIT_IN__COUNT;
+ driver->vsdrv.out_notify_count = VSERVICE_BLOCK_NBIT_OUT__COUNT;
+
+ driver->vsdrv.probe = block_server_probe;
+ driver->vsdrv.remove = block_server_remove;
+ driver->vsdrv.receive = block_handle_message;
+ driver->vsdrv.notify = block_handle_notify;
+ driver->vsdrv.start = server->tx_atomic ?
+ block_handle_start_bh : block_handle_start;
+ driver->vsdrv.reset = server->tx_atomic ?
+ block_handle_reset_bh : block_handle_reset;
+ driver->vsdrv.tx_ready = block_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ server->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_block_server_register);
+
+int vservice_block_server_unregister(struct vs_server_block *server)
+{
+ struct vs_block_server_driver *driver;
+
+ if (!server->driver)
+ return 0;
+
+ driver = to_server_driver(server->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ server->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_block_server_unregister);
+
+static int block_server_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server = to_server_driver(vsdrv)->server;
+ struct vs_server_block_state *state;
+
+ state = server->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_BLOCK_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int block_server_remove(struct vs_service_device *service)
+{
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server = to_server_driver(vsdrv)->server;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ server->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int block_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_block *server = to_server_driver(vsdrv)->server;
+ struct vs_server_block_state *state = dev_get_drvdata(&service->dev);
+
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+ return 0;
+
+ if (server->tx_ready)
+ server->tx_ready(state);
+
+ return 0;
+}
+
+static int
+vs_server_block_send_ack_open(struct vs_server_block_state *_state, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 28UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_ACK_OPEN;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _state->readonly;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ _state->sector_size;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL) =
+ _state->segment_size;
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+ _state->device_sectors;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+ _state->flushable;
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+ _state->committable;
+ _state->io.sector_size = _state->sector_size;
+ _state->io.segment_size = _state->segment_size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_open);
+static int
+vs_server_block_send_nack_open(struct vs_server_block_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_NACK_OPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_open);
+static int
+vs_server_block_send_ack_close(struct vs_server_block_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_ACK_CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_close);
+static int
+vs_server_block_send_nack_close(struct vs_server_block_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_NACK_CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_close);
+static int
+vs_server_block_send_ack_reopen(struct vs_server_block_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_ACK_REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE__RESET;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_ack_reopen);
+static int
+vs_server_block_send_nack_reopen(struct vs_server_block_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_BASE_NACK_REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_send_nack_reopen);
+static int
+vs_server_block_handle_req_open(const struct vs_server_block *_server,
+ struct vs_server_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->open)
+ return vs_server_block_open_complete(_state,
+ _server->open(_state));
+ return vs_server_block_open_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_block_open_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS)
+ ret =
+ vs_server_block_send_ack_open(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR(_state))
+ ? GFP_ATOMIC : GFP_KERNEL);
+ else if (resp == VS_SERVER_RESP_FAILURE)
+ ret =
+ vs_server_block_send_nack_open(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_open_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_open);
+static int
+vs_server_block_handle_req_close(const struct vs_server_block *_server,
+ struct vs_server_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->close)
+ return vs_server_block_close_complete(_state,
+ _server->close(_state));
+ return vs_server_block_close_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_block_close_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS)
+ ret =
+ vs_server_block_send_ack_close(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ else if (resp == VS_SERVER_RESP_FAILURE)
+ ret =
+ vs_server_block_send_nack_close(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ if ((resp == VS_SERVER_RESP_SUCCESS) && (ret == 0)) {
+ wake_up_all(&_state->service->quota_wq);
+ }
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_close_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_close);
+static int
+vs_server_block_handle_req_reopen(const struct vs_server_block *_server,
+ struct vs_server_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->reopen)
+ return vs_server_block_reopen_complete(_state,
+ _server->reopen(_state));
+ else
+ return vs_server_block_send_nack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC
+ : GFP_KERNEL);
+
+}
+
+int vs_server_block_reopen_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS) {
+ _state->io.sector_size = _state->sector_size;
+ _state->io.segment_size = _state->segment_size;
+ ret =
+ vs_server_block_send_ack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ } else if (resp == VS_SERVER_RESP_FAILURE) {
+ ret =
+ vs_server_block_send_nack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ }
+
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_block_reopen_complete);
+
+EXPORT_SYMBOL(vs_server_block_handle_req_reopen);
+struct vs_mbuf *vs_server_block_io_alloc_ack_read(struct vs_server_block_state
+ *_state, struct vs_pbuf *data,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+ const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_ACK_READ;
+ const uint32_t _msg_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 8UL;
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return _mbuf;
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENOMEM);
+ }
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+ if (!data)
+ goto fail;
+ data->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL +
+ sizeof(uint32_t));
+ data->size = _state->io.segment_size;
+ data->max_size = data->size;
+ return _mbuf;
+
+ fail:
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_alloc_ack_read);
+int vs_server_block_io_free_ack_read(struct vs_server_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_free_ack_read);
+int vs_server_block_io_getbufs_req_write(struct vs_server_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ const vs_message_id_t _msg_id = VSERVICE_BLOCK_IO_REQ_WRITE;
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+ const size_t _min_size = _max_size - _state->io.segment_size;
+ size_t _exact_size;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+ return -EINVAL;
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ data->size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 28UL);
+ data->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 28UL + sizeof(uint32_t));
+ data->max_size = data->size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->io.segment_size - data->size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_getbufs_req_write);
+int vs_server_block_io_free_req_write(struct vs_server_block_state *_state,
+ struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_free_req_write);
+int
+vs_server_block_io_send_ack_read(struct vs_server_block_state *_state,
+ uint32_t _opaque, struct vs_pbuf data,
+ struct vs_mbuf *_mbuf)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ if (_opaque >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque, _state->state.io.read_bitmask))
+ return -EPROTO;
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+ VSERVICE_BLOCK_IO_ACK_READ)
+
+ return -EINVAL;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque;
+ if ((data.size + sizeof(vs_message_id_t) + 4UL) > VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ if (data.size < data.max_size)
+ VS_MBUF_SIZE(_mbuf) -= (data.max_size - data.size);
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ data.size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ __clear_bit(_opaque, _state->state.io.read_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_ack_read);
+int
+vs_server_block_io_send_nack_read(struct vs_server_block_state *_state,
+ uint32_t _opaque,
+ vservice_block_block_io_error_t err,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ if (_opaque >= VSERVICE_BLOCK_IO_READ_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque, _state->state.io.read_bitmask))
+ return -EPROTO;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_IO_NACK_READ;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque;
+ *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+ sizeof(vs_message_id_t) + 4UL) =
+ err;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ __clear_bit(_opaque, _state->state.io.read_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_nack_read);
+int
+vs_server_block_io_send_ack_write(struct vs_server_block_state *_state,
+ uint32_t _opaque, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ if (_opaque >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque, _state->state.io.write_bitmask))
+ return -EPROTO;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_IO_ACK_WRITE;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ __clear_bit(_opaque, _state->state.io.write_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_ack_write);
+int
+vs_server_block_io_send_nack_write(struct vs_server_block_state *_state,
+ uint32_t _opaque,
+ vservice_block_block_io_error_t err,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_block *_server =
+ to_server_driver(vsdrv)->server;
+
+ if (_opaque >= VSERVICE_BLOCK_IO_WRITE_MAX_PENDING)
+ return -EPROTO;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+ if (!test_bit(_opaque, _state->state.io.write_bitmask))
+ return -EPROTO;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_BLOCK_IO_NACK_WRITE;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _opaque;
+ *(vservice_block_block_io_error_t *) (VS_MBUF_DATA(_mbuf) +
+ sizeof(vs_message_id_t) + 4UL) =
+ err;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ __clear_bit(_opaque, _state->state.io.write_bitmask);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_send_nack_write);
+static int
+vs_server_block_io_handle_req_read(const struct vs_server_block *_server,
+ struct vs_server_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 24UL;
+ uint32_t _opaque;
+ uint64_t sector_index;
+ uint32_t num_sects;
+ bool nodelay;
+ bool flush;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ _opaque =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ if (test_bit(_opaque, _state->state.io.read_bitmask))
+ return -EPROTO;
+ __set_bit(_opaque, _state->state.io.read_bitmask);
+ _opaque =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ sector_index =
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ num_sects =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 12UL);
+ nodelay =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL);
+ flush =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->io.req_read)
+ return _server->io.req_read(_state, _opaque, sector_index,
+ num_sects, nodelay, flush);
+ else
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: No handler registered for _server->io.req_read, command will never be acknowledged\n",
+ __func__, __LINE__);
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_handle_req_read);
+static int
+vs_server_block_io_handle_req_write(const struct vs_server_block *_server,
+ struct vs_server_block_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->io.segment_size + 32UL;
+ uint32_t _opaque;
+ uint64_t sector_index;
+ uint32_t num_sects;
+ bool nodelay;
+ bool flush;
+ bool commit;
+ struct vs_pbuf data;
+ const size_t _min_size = _max_size - _state->io.segment_size;
+ size_t _exact_size;
+
+ /* The first check is to ensure the message isn't complete garbage */
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+ _opaque =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ if (test_bit(_opaque, _state->state.io.write_bitmask))
+ return -EPROTO;
+ __set_bit(_opaque, _state->state.io.write_bitmask);
+ _opaque =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ sector_index =
+ *(uint64_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ num_sects =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 12UL);
+ nodelay =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL);
+ flush =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL);
+ commit =
+ *(bool *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL);
+ data.size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 28UL);
+ data.data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 28UL + sizeof(uint32_t));
+ data.max_size = data.size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->io.segment_size - data.size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+ if (_server->io.req_write)
+ return _server->io.req_write(_state, _opaque, sector_index,
+ num_sects, nodelay, flush, commit,
+ data, _mbuf);
+ else
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: No handler registered for _server->io.req_write, command will never be acknowledged\n",
+ __func__, __LINE__);
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_block_io_handle_req_write);
+static int
+block_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_server_block_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_block *server =
+ to_server_driver(vsdrv)->server;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+ case VSERVICE_BLOCK_BASE_REQ_OPEN:
+ ret = vs_server_block_handle_req_open(server, state, _mbuf);
+ break;
+
+/* command in sync close */
+ case VSERVICE_BLOCK_BASE_REQ_CLOSE:
+ ret = vs_server_block_handle_req_close(server, state, _mbuf);
+ break;
+
+/* command in sync reopen */
+ case VSERVICE_BLOCK_BASE_REQ_REOPEN:
+ ret = vs_server_block_handle_req_reopen(server, state, _mbuf);
+ break;
+
+/** interface block_io **/
+/* command in parallel read */
+ case VSERVICE_BLOCK_IO_REQ_READ:
+ ret = vs_server_block_io_handle_req_read(server, state, _mbuf);
+ break;
+
+/* command in parallel write */
+ case VSERVICE_BLOCK_IO_REQ_WRITE:
+ ret = vs_server_block_io_handle_req_write(server, state, _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void block_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_server_block_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_block *server =
+ to_server_driver(vsdrv)->server;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface block_io **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services blockServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/core/Makefile b/drivers/vservices/protocol/core/Makefile
new file mode 100644
index 0000000..6bef7f5
--- /dev/null
+++ b/drivers/vservices/protocol/core/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_SERVER) += vservices_protocol_core_server.o
+vservices_protocol_core_server-objs = server.o
+
+obj-$(CONFIG_VSERVICES_CLIENT) += vservices_protocol_core_client.o
+vservices_protocol_core_client-objs = client.o
diff --git a/drivers/vservices/protocol/core/client.c b/drivers/vservices/protocol/core/client.c
new file mode 100644
index 0000000..2dd2136
--- /dev/null
+++ b/drivers/vservices/protocol/core/client.c
@@ -0,0 +1,1069 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the core client protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_client_driver {
+ struct vs_client_core *client;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+ container_of(d, struct vs_core_client_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (client->start)
+ client->start(state);
+ vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (client->reset)
+ client->reset(state);
+ vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (client->start)
+ client->start(state);
+ vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (client->reset)
+ client->reset(state);
+ vs_service_state_unlock_bh(service);
+}
+
+static int core_client_probe(struct vs_service_device *service);
+static int core_client_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_client_register(struct vs_client_core *client,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_core_client_driver *driver;
+
+ if (client->tx_atomic && !client->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ client->driver = &driver->vsdrv;
+ driver->client = client;
+
+ driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = false;
+ driver->vsdrv.rx_atomic = client->rx_atomic;
+ driver->vsdrv.tx_atomic = client->tx_atomic;
+
+ driver->vsdrv.probe = core_client_probe;
+ driver->vsdrv.remove = core_client_remove;
+ driver->vsdrv.receive = core_handle_message;
+ driver->vsdrv.notify = core_handle_notify;
+ driver->vsdrv.start = client->tx_atomic ?
+ core_handle_start_bh : core_handle_start;
+ driver->vsdrv.reset = client->tx_atomic ?
+ core_handle_reset_bh : core_handle_reset;
+ driver->vsdrv.tx_ready = core_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ client->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_client_register);
+
+int vservice_core_client_unregister(struct vs_client_core *client)
+{
+ struct vs_core_client_driver *driver;
+
+ if (!client->driver)
+ return 0;
+
+ driver = to_client_driver(client->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ client->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_client_unregister);
+
+static int core_client_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client = to_client_driver(vsdrv)->client;
+ struct vs_client_core_state *state;
+
+ state = client->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int core_client_remove(struct vs_service_device *service)
+{
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client = to_client_driver(vsdrv)->client;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ client->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_core *client = to_client_driver(vsdrv)->client;
+ struct vs_client_core_state *state = dev_get_drvdata(&service->dev);
+
+ if (client->tx_ready)
+ client->tx_ready(state);
+
+ return 0;
+}
+
+int vs_client_core_core_getbufs_service_created(struct vs_client_core_state
+ *_state,
+ struct vs_string *service_name,
+ struct vs_string *protocol_name,
+ struct vs_mbuf *_mbuf)
+{
+ const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+ VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+ const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+ size_t _exact_size;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+ return -EINVAL;
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ service_name->ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+ protocol_name->ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+ protocol_name->max_size =
+ VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+ /* Now check the size received is the exact size expected */
+ _exact_size =
+ _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+ protocol_name->max_size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_getbufs_service_created);
+int vs_client_core_core_free_service_created(struct vs_client_core_state
+ *_state,
+ struct vs_string *service_name,
+ struct vs_string *protocol_name,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_free_service_created);
+int
+vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_core *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_REQ_CONNECT;
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_connect);
+int
+vs_client_core_core_req_disconnect(struct vs_client_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_core *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_REQ_DISCONNECT;
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_req_disconnect);
+static int
+core_core_handle_ack_connect(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_CONNECTED);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.ack_connect)
+ return _client->core.ack_connect(_state);
+ return 0;
+}
+
+static int
+core_core_handle_nack_connect(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.nack_connect)
+ return _client->core.nack_connect(_state);
+ return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_connect);
+static int
+core_core_handle_ack_disconnect(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.ack_disconnect)
+ return _client->core.ack_disconnect(_state);
+ return 0;
+}
+
+static int
+core_core_handle_nack_disconnect(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+ VSERVICE_CORE_STATE_CONNECTED);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.nack_disconnect)
+ return _client->core.nack_disconnect(_state);
+ return 0;
+}
+
+EXPORT_SYMBOL(core_core_handle_ack_disconnect);
+static int
+vs_client_core_core_handle_startup(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 8UL;
+ uint32_t core_in_quota;
+ uint32_t core_out_quota;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_OFFLINE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+ core_in_quota =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ core_out_quota =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.msg_startup)
+ return _client->core.msg_startup(_state, core_in_quota,
+ core_out_quota);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_startup);
+static int
+vs_client_core_core_handle_shutdown(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+ _state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_OFFLINE);
+ break;
+ case VSERVICE_CORE_STATE_CONNECTED:
+ _state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+ if (_client->core.state_change)
+ _client->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_OFFLINE);
+ break;
+
+ default:
+ break;
+ }
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.msg_shutdown)
+ return _client->core.msg_shutdown(_state);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_shutdown);
+static int
+vs_client_core_core_handle_service_created(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+ VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+ uint32_t service_id;
+ struct vs_string service_name;
+ struct vs_string protocol_name;
+ const size_t _min_size = _max_size - VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+ size_t _exact_size;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ /* The first check is to ensure the message isn't complete garbage */
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+ service_id =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ service_name.ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ service_name.max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+
+ protocol_name.ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+ protocol_name.max_size =
+ VS_MBUF_SIZE(_mbuf) - (sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+
+ /* Now check the size received is the exact size expected */
+ _exact_size =
+ _max_size - (VSERVICE_CORE_PROTOCOL_NAME_SIZE -
+ protocol_name.max_size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+ if (_client->core.msg_service_created)
+ return _client->core.msg_service_created(_state, service_id,
+ service_name,
+ protocol_name, _mbuf);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_created);
+static int
+vs_client_core_core_handle_service_removed(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+ uint32_t service_id;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ service_id =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.msg_service_removed)
+ return _client->core.msg_service_removed(_state, service_id);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_removed);
+static int
+vs_client_core_core_handle_server_ready(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 28UL;
+ uint32_t service_id;
+ uint32_t in_quota;
+ uint32_t out_quota;
+ uint32_t in_bit_offset;
+ uint32_t in_num_bits;
+ uint32_t out_bit_offset;
+ uint32_t out_num_bits;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ service_id =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ in_quota =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ out_quota =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL);
+ in_bit_offset =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 12UL);
+ in_num_bits =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 16UL);
+ out_bit_offset =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 20UL);
+ out_num_bits =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ 24UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.msg_server_ready)
+ return _client->core.msg_server_ready(_state, service_id,
+ in_quota, out_quota,
+ in_bit_offset,
+ in_num_bits,
+ out_bit_offset,
+ out_num_bits);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_server_ready);
+static int
+vs_client_core_core_handle_service_reset(const struct vs_client_core *_client,
+ struct vs_client_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+ uint32_t service_id;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ service_id =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->core.msg_service_reset)
+ return _client->core.msg_service_reset(_state, service_id);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_handle_service_reset);
+int
+vs_client_core_core_send_service_reset(struct vs_client_core_state *_state,
+ uint32_t service_id, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_core *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ service_id;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_core_core_send_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_client_core_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_core *client =
+ to_client_driver(vsdrv)->client;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+ case VSERVICE_CORE_CORE_ACK_CONNECT:
+ ret = core_core_handle_ack_connect(client, state, _mbuf);
+ break;
+ case VSERVICE_CORE_CORE_NACK_CONNECT:
+ ret = core_core_handle_nack_connect(client, state, _mbuf);
+ break;
+
+/* command in sync disconnect */
+ case VSERVICE_CORE_CORE_ACK_DISCONNECT:
+ ret = core_core_handle_ack_disconnect(client, state, _mbuf);
+ break;
+ case VSERVICE_CORE_CORE_NACK_DISCONNECT:
+ ret = core_core_handle_nack_disconnect(client, state, _mbuf);
+ break;
+
+/* message startup */
+ case VSERVICE_CORE_CORE_MSG_STARTUP:
+ ret = vs_client_core_core_handle_startup(client, state, _mbuf);
+ break;
+
+/* message shutdown */
+ case VSERVICE_CORE_CORE_MSG_SHUTDOWN:
+ ret = vs_client_core_core_handle_shutdown(client, state, _mbuf);
+ break;
+
+/* message service_created */
+ case VSERVICE_CORE_CORE_MSG_SERVICE_CREATED:
+ ret =
+ vs_client_core_core_handle_service_created(client, state,
+ _mbuf);
+ break;
+
+/* message service_removed */
+ case VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED:
+ ret =
+ vs_client_core_core_handle_service_removed(client, state,
+ _mbuf);
+ break;
+
+/* message server_ready */
+ case VSERVICE_CORE_CORE_MSG_SERVER_READY:
+ ret =
+ vs_client_core_core_handle_server_ready(client, state,
+ _mbuf);
+ break;
+
+/* message service_reset */
+ case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+ ret =
+ vs_client_core_core_handle_service_reset(client, state,
+ _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_client_core_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_core *client =
+ to_client_driver(vsdrv)->client;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface core **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/core/server.c b/drivers/vservices/protocol/core/server.c
new file mode 100644
index 0000000..c3f3686
--- /dev/null
+++ b/drivers/vservices/protocol/core/server.c
@@ -0,0 +1,1226 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the core server protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/core/types.h>
+#include <vservices/protocol/core/common.h>
+#include <vservices/protocol/core/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_core_server_driver {
+ struct vs_server_core *server;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+ container_of(d, struct vs_core_server_driver, vsdrv)
+
+static void core_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (server->start)
+ server->start(state);
+ vs_service_state_unlock(service);
+}
+
+static void core_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (server->reset)
+ server->reset(state);
+ vs_service_state_unlock(service);
+}
+
+static void core_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (server->start)
+ server->start(state);
+ vs_service_state_unlock_bh(service);
+}
+
+static void core_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+ if (server->reset)
+ server->reset(state);
+ vs_service_state_unlock_bh(service);
+}
+
+static int core_server_probe(struct vs_service_device *service);
+static int core_server_remove(struct vs_service_device *service);
+static int core_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void core_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void core_handle_start(struct vs_service_device *service);
+static void core_handle_start_bh(struct vs_service_device *service);
+static void core_handle_reset(struct vs_service_device *service);
+static void core_handle_reset_bh(struct vs_service_device *service);
+static int core_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_core_server_register(struct vs_server_core *server,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_core_server_driver *driver;
+
+ if (server->tx_atomic && !server->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ server->driver = &driver->vsdrv;
+ driver->server = server;
+
+ driver->vsdrv.protocol = VSERVICE_CORE_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = true;
+ driver->vsdrv.rx_atomic = server->rx_atomic;
+ driver->vsdrv.tx_atomic = server->tx_atomic;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.in_quota_min = 1;
+ driver->vsdrv.in_quota_best = server->in_quota_best ?
+ server->in_quota_best : driver->vsdrv.in_quota_min;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.out_quota_min = 1;
+ driver->vsdrv.out_quota_best = server->out_quota_best ?
+ server->out_quota_best : driver->vsdrv.out_quota_min;
+ driver->vsdrv.in_notify_count = VSERVICE_CORE_NBIT_IN__COUNT;
+ driver->vsdrv.out_notify_count = VSERVICE_CORE_NBIT_OUT__COUNT;
+
+ driver->vsdrv.probe = core_server_probe;
+ driver->vsdrv.remove = core_server_remove;
+ driver->vsdrv.receive = core_handle_message;
+ driver->vsdrv.notify = core_handle_notify;
+ driver->vsdrv.start = server->tx_atomic ?
+ core_handle_start_bh : core_handle_start;
+ driver->vsdrv.reset = server->tx_atomic ?
+ core_handle_reset_bh : core_handle_reset;
+ driver->vsdrv.tx_ready = core_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ server->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_core_server_register);
+
+int vservice_core_server_unregister(struct vs_server_core *server)
+{
+ struct vs_core_server_driver *driver;
+
+ if (!server->driver)
+ return 0;
+
+ driver = to_server_driver(server->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ server->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_core_server_unregister);
+
+static int core_server_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server = to_server_driver(vsdrv)->server;
+ struct vs_server_core_state *state;
+
+ state = server->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_CORE_PROTOCOL_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int core_server_remove(struct vs_service_device *service)
+{
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server = to_server_driver(vsdrv)->server;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ server->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int core_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_core *server = to_server_driver(vsdrv)->server;
+ struct vs_server_core_state *state = dev_get_drvdata(&service->dev);
+
+ if (server->tx_ready)
+ server->tx_ready(state);
+
+ return 0;
+}
+
+struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+ vs_server_core_state
+ *_state,
+ struct vs_string
+ *service_name,
+ struct vs_string
+ *protocol_name,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+ const vs_message_id_t _msg_id = VSERVICE_CORE_CORE_MSG_SERVICE_CREATED;
+ const uint32_t _msg_size =
+ sizeof(vs_message_id_t) + VSERVICE_CORE_SERVICE_NAME_SIZE +
+ VSERVICE_CORE_PROTOCOL_NAME_SIZE + 4UL;
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return _mbuf;
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENOMEM);
+ }
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+ if (!service_name)
+ goto fail;
+ service_name->ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL);
+ service_name->max_size = VSERVICE_CORE_SERVICE_NAME_SIZE;
+ if (!protocol_name)
+ goto fail;
+ protocol_name->ptr =
+ (char *)(VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL);
+ protocol_name->max_size = VSERVICE_CORE_PROTOCOL_NAME_SIZE;
+
+ return _mbuf;
+
+ fail:
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_alloc_service_created);
+int vs_server_core_core_free_service_created(struct vs_server_core_state
+ *_state,
+ struct vs_string *service_name,
+ struct vs_string *protocol_name,
+ struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_free_service_created);
+int
+vs_server_core_core_send_ack_connect(struct vs_server_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_ACK_CONNECT;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_CONNECTED);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_connect);
+int
+vs_server_core_core_send_nack_connect(struct vs_server_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_NACK_CONNECT;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_connect);
+int
+vs_server_core_core_send_ack_disconnect(struct vs_server_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_ACK_DISCONNECT;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_ack_disconnect);
+int
+vs_server_core_core_send_nack_disconnect(struct vs_server_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_NACK_DISCONNECT;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+ VSERVICE_CORE_STATE_CONNECTED);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_nack_disconnect);
+static int
+vs_server_core_core_handle_req_connect(const struct vs_server_core *_server,
+ struct vs_server_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED__CONNECT;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->core.req_connect)
+ return _server->core.req_connect(_state);
+ else
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: No handler registered for _server->core.req_connect, command will never be acknowledged\n",
+ __func__, __LINE__);
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_connect);
+static int
+vs_server_core_core_handle_req_disconnect(const struct vs_server_core *_server,
+ struct vs_server_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ _state->state.core.statenum = VSERVICE_CORE_STATE_CONNECTED__DISCONNECT;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->core.req_disconnect)
+ return _server->core.req_disconnect(_state);
+ else
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: No handler registered for _server->core.req_disconnect, command will never be acknowledged\n",
+ __func__, __LINE__);
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_req_disconnect);
+int
+vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+ uint32_t core_in_quota,
+ uint32_t core_out_quota, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 8UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_OFFLINE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_STARTUP;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ core_in_quota;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ core_out_quota;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.core.statenum = VSERVICE_CORE_STATE_DISCONNECTED;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state, VSERVICE_CORE_STATE_OFFLINE,
+ VSERVICE_CORE_STATE_DISCONNECTED);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_startup);
+int
+vs_server_core_core_send_shutdown(struct vs_server_core_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+ case VSERVICE_CORE_STATE_DISCONNECTED__CONNECT:
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_SHUTDOWN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_DISCONNECTED:
+ _state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_OFFLINE);
+ break;
+ case VSERVICE_CORE_STATE_CONNECTED:
+ _state->state.core.statenum = VSERVICE_CORE_STATE_OFFLINE;
+
+ if (_server->core.state_change)
+ _server->core.state_change(_state,
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_OFFLINE);
+ break;
+
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_shutdown);
+int
+vs_server_core_core_send_service_created(struct vs_server_core_state *_state,
+ uint32_t service_id,
+ struct vs_string service_name,
+ struct vs_string protocol_name,
+ struct vs_mbuf *_mbuf)
+{
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+ VSERVICE_CORE_CORE_MSG_SERVICE_CREATED)
+
+ return -EINVAL;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ service_id;
+ {
+ size_t _size = strnlen(service_name.ptr, service_name.max_size);
+ if ((_size + sizeof(vs_message_id_t) + 4UL) >
+ VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ memset(service_name.ptr + _size, 0,
+ service_name.max_size - _size);
+ }
+ {
+ size_t _size =
+ strnlen(protocol_name.ptr, protocol_name.max_size);
+ if ((_size + sizeof(vs_message_id_t) +
+ VSERVICE_CORE_SERVICE_NAME_SIZE + 4UL) >
+ VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ if (_size < protocol_name.max_size)
+ VS_MBUF_SIZE(_mbuf) -= (protocol_name.max_size - _size);
+
+ }
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_created);
+int
+vs_server_core_core_send_service_removed(struct vs_server_core_state *_state,
+ uint32_t service_id, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ service_id;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_removed);
+int
+vs_server_core_core_send_server_ready(struct vs_server_core_state *_state,
+ uint32_t service_id, uint32_t in_quota,
+ uint32_t out_quota,
+ uint32_t in_bit_offset,
+ uint32_t in_num_bits,
+ uint32_t out_bit_offset,
+ uint32_t out_num_bits, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 28UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+ case VSERVICE_CORE_STATE_CONNECTED__DISCONNECT:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_SERVER_READY;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ service_id;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 4UL) =
+ in_quota;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 8UL) =
+ out_quota;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 12UL) =
+ in_bit_offset;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 16UL) =
+ in_num_bits;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 20UL) =
+ out_bit_offset;
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 24UL) =
+ out_num_bits;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_server_ready);
+int
+vs_server_core_core_send_service_reset(struct vs_server_core_state *_state,
+ uint32_t service_id, gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_core *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_CORE_CORE_MSG_SERVICE_RESET;
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ service_id;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_send_service_reset);
+static int
+vs_server_core_core_handle_service_reset(const struct vs_server_core *_server,
+ struct vs_server_core_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+ uint32_t service_id;
+
+ switch (_state->state.core.statenum) {
+ case VSERVICE_CORE_STATE_CONNECTED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.core.statenum,
+ vservice_core_get_state_string(_state->state.core));
+
+ return -EPROTO;
+
+ }
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ service_id =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->core.msg_service_reset)
+ return _server->core.msg_service_reset(_state, service_id);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_core_core_handle_service_reset);
+static int
+core_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_server_core_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_core *server =
+ to_server_driver(vsdrv)->server;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface core **/
+/* command in sync connect */
+ case VSERVICE_CORE_CORE_REQ_CONNECT:
+ ret =
+ vs_server_core_core_handle_req_connect(server, state,
+ _mbuf);
+ break;
+
+/* command in sync disconnect */
+ case VSERVICE_CORE_CORE_REQ_DISCONNECT:
+ ret =
+ vs_server_core_core_handle_req_disconnect(server, state,
+ _mbuf);
+ break;
+
+/* message service_reset */
+ case VSERVICE_CORE_CORE_MSG_SERVICE_RESET:
+ ret =
+ vs_server_core_core_handle_service_reset(server, state,
+ _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void core_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_server_core_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_core *server =
+ to_server_driver(vsdrv)->server;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface core **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services coreServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/serial/Makefile b/drivers/vservices/protocol/serial/Makefile
new file mode 100644
index 0000000..f5f29ed
--- /dev/null
+++ b/drivers/vservices/protocol/serial/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -Werror
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL_CLIENT) += vservices_protocol_serial_client.o
+vservices_protocol_serial_client-objs = client.o
+
+obj-$(CONFIG_VSERVICES_PROTOCOL_SERIAL_SERVER) += vservices_protocol_serial_server.o
+vservices_protocol_serial_server-objs = server.o
diff --git a/drivers/vservices/protocol/serial/client.c b/drivers/vservices/protocol/serial/client.c
new file mode 100644
index 0000000..1c37e72
--- /dev/null
+++ b/drivers/vservices/protocol/serial/client.c
@@ -0,0 +1,925 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the serial client protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/client.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+static int _vs_client_serial_req_open(struct vs_client_serial_state *_state);
+
+/*** Linux driver model integration ***/
+struct vs_serial_client_driver {
+ struct vs_client_serial *client;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_client_driver(d) \
+ container_of(d, struct vs_serial_client_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void serial_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ _vs_client_serial_req_open(state);
+
+ vs_service_state_unlock(service);
+}
+
+static void serial_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (client->closed)
+ client->closed(state);
+
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void serial_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ _vs_client_serial_req_open(state);
+
+ vs_service_state_unlock_bh(service);
+}
+
+static void serial_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client __maybe_unused =
+ to_client_driver(vsdrv)->client;
+
+ vs_service_state_lock_bh(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock_bh(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (client->closed)
+ client->closed(state);
+
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static int serial_client_probe(struct vs_service_device *service);
+static int serial_client_remove(struct vs_service_device *service);
+static int serial_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void serial_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void serial_handle_start(struct vs_service_device *service);
+static void serial_handle_start_bh(struct vs_service_device *service);
+static void serial_handle_reset(struct vs_service_device *service);
+static void serial_handle_reset_bh(struct vs_service_device *service);
+static int serial_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_serial_client_register(struct vs_client_serial *client,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_serial_client_driver *driver;
+
+ if (client->tx_atomic && !client->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ client->driver = &driver->vsdrv;
+ driver->client = client;
+
+ driver->vsdrv.protocol = VSERVICE_SERIAL_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = false;
+ driver->vsdrv.rx_atomic = client->rx_atomic;
+ driver->vsdrv.tx_atomic = client->tx_atomic;
+
+ driver->vsdrv.probe = serial_client_probe;
+ driver->vsdrv.remove = serial_client_remove;
+ driver->vsdrv.receive = serial_handle_message;
+ driver->vsdrv.notify = serial_handle_notify;
+ driver->vsdrv.start = client->tx_atomic ?
+ serial_handle_start_bh : serial_handle_start;
+ driver->vsdrv.reset = client->tx_atomic ?
+ serial_handle_reset_bh : serial_handle_reset;
+ driver->vsdrv.tx_ready = serial_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_client_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ client->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_serial_client_register);
+
+int vservice_serial_client_unregister(struct vs_client_serial *client)
+{
+ struct vs_serial_client_driver *driver;
+
+ if (!client->driver)
+ return 0;
+
+ driver = to_client_driver(client->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ client->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_serial_client_unregister);
+
+static int serial_client_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+ struct vs_client_serial_state *state;
+
+ state = client->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int serial_client_remove(struct vs_service_device *service)
+{
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ client->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int serial_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_client_serial *client = to_client_driver(vsdrv)->client;
+ struct vs_client_serial_state *state = dev_get_drvdata(&service->dev);
+
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+ return 0;
+
+ if (client->tx_ready)
+ client->tx_ready(state);
+
+ return 0;
+}
+
+static int _vs_client_serial_req_open(struct vs_client_serial_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_serial *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_REQ_OPEN;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_open);
+static int _vs_client_serial_req_close(struct vs_client_serial_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_serial *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_REQ_CLOSE;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_close);
+static int _vs_client_serial_req_reopen(struct vs_client_serial_state *_state)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_serial *_client =
+ to_client_driver(vsdrv)->client;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ vs_service_has_atomic_rx(VS_STATE_SERVICE_PTR
+ (_state)) ?
+ GFP_ATOMIC : GFP_KERNEL);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_REQ_REOPEN;
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(_vs_client_serial_req_reopen);
+static int
+serial_base_handle_ack_open(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 4UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+ _state->serial.packet_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ _state->packet_size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ _client->opened(_state);
+ return 0;
+
+}
+
+static int
+serial_base_handle_nack_open(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ dev_err(&VS_STATE_SERVICE_PTR(_state)->dev,
+ "Open operation failed for device %s\n",
+ VS_STATE_SERVICE_PTR(_state)->name);
+
+ return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_open);
+static int
+serial_base_handle_ack_close(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return 0;
+
+}
+
+static int
+serial_base_handle_nack_close(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_close);
+static int
+serial_base_handle_ack_reopen(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE__RESET;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_client->reopened) {
+ _client->reopened(_state);
+ return 0;
+ }
+ wake_up_all(&_state->service->quota_wq);
+ _client->closed(_state);
+ return _vs_client_serial_req_open(_state);
+
+}
+
+static int
+serial_base_handle_nack_reopen(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return 0;
+
+}
+
+EXPORT_SYMBOL(serial_base_handle_ack_reopen);
+struct vs_mbuf *vs_client_serial_serial_alloc_msg(struct vs_client_serial_state
+ *_state, struct vs_pbuf *b,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+ const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+ const uint32_t _msg_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return _mbuf;
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENOMEM);
+ }
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+ if (!b)
+ goto fail;
+ b->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b->size = _state->serial.packet_size;
+ b->max_size = b->size;
+ return _mbuf;
+
+ fail:
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return NULL;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_alloc_msg);
+int vs_client_serial_serial_getbufs_msg(struct vs_client_serial_state *_state,
+ struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf)
+{
+ const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ const size_t _min_size = _max_size - _state->serial.packet_size;
+ size_t _exact_size;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+ return -EINVAL;
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ b->size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ b->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b->max_size = b->size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->serial.packet_size - b->size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_getbufs_msg);
+int vs_client_serial_serial_free_msg(struct vs_client_serial_state *_state,
+ struct vs_pbuf *b, struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_free_msg);
+static int
+vs_client_serial_serial_handle_msg(const struct vs_client_serial *_client,
+ struct vs_client_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ struct vs_pbuf b;
+ const size_t _min_size = _max_size - _state->serial.packet_size;
+ size_t _exact_size;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+
+ /* The first check is to ensure the message isn't complete garbage */
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ b.size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ b.data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b.max_size = b.size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->serial.packet_size - b.size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+ if (_client->serial.msg_msg)
+ return _client->serial.msg_msg(_state, b, _mbuf);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_handle_msg);
+int
+vs_client_serial_serial_send_msg(struct vs_client_serial_state *_state,
+ struct vs_pbuf b, struct vs_mbuf *_mbuf)
+{
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_client_serial *_client =
+ to_client_driver(vsdrv)->client;
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+ VSERVICE_SERIAL_SERIAL_MSG_MSG)
+
+ return -EINVAL;
+
+ if ((b.size + sizeof(vs_message_id_t) + 0UL) > VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ if (b.size < b.max_size)
+ VS_MBUF_SIZE(_mbuf) -= (b.max_size - b.size);
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ b.size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_client_serial_serial_send_msg);
+static int
+serial_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_client_serial_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_serial *client =
+ to_client_driver(vsdrv)->client;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+ case VSERVICE_SERIAL_BASE_ACK_OPEN:
+ ret = serial_base_handle_ack_open(client, state, _mbuf);
+ break;
+ case VSERVICE_SERIAL_BASE_NACK_OPEN:
+ ret = serial_base_handle_nack_open(client, state, _mbuf);
+ break;
+
+/* command in sync close */
+ case VSERVICE_SERIAL_BASE_ACK_CLOSE:
+ ret = serial_base_handle_ack_close(client, state, _mbuf);
+ break;
+ case VSERVICE_SERIAL_BASE_NACK_CLOSE:
+ ret = serial_base_handle_nack_close(client, state, _mbuf);
+ break;
+
+/* command in sync reopen */
+ case VSERVICE_SERIAL_BASE_ACK_REOPEN:
+ ret = serial_base_handle_ack_reopen(client, state, _mbuf);
+ break;
+ case VSERVICE_SERIAL_BASE_NACK_REOPEN:
+ ret = serial_base_handle_nack_reopen(client, state, _mbuf);
+ break;
+
+/** interface serial **/
+/* message msg */
+ case VSERVICE_SERIAL_SERIAL_MSG_MSG:
+ ret = vs_client_serial_serial_handle_msg(client, state, _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void serial_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_client_serial_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_client_serial *client =
+ to_client_driver(vsdrv)->client;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface serial **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+int vs_client_serial_reopen(struct vs_client_serial_state *_state)
+{
+ return _vs_client_serial_req_reopen(_state);
+}
+
+EXPORT_SYMBOL(vs_client_serial_reopen);
+
+int vs_client_serial_close(struct vs_client_serial_state *_state)
+{
+ return _vs_client_serial_req_close(_state);
+}
+
+EXPORT_SYMBOL(vs_client_serial_close);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services serialClient Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/protocol/serial/server.c b/drivers/vservices/protocol/serial/server.c
new file mode 100644
index 0000000..e5d1034
--- /dev/null
+++ b/drivers/vservices/protocol/serial/server.c
@@ -0,0 +1,1086 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+ /*
+ * This is the generated code for the serial server protocol handling.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 2, 0)
+#include <linux/export.h>
+#endif
+
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+#include <vservices/protocol/serial/types.h>
+#include <vservices/protocol/serial/common.h>
+#include <vservices/protocol/serial/server.h>
+#include <vservices/service.h>
+
+#include "../../transport.h"
+
+#define VS_MBUF_SIZE(mbuf) mbuf->size
+#define VS_MBUF_DATA(mbuf) mbuf->data
+#define VS_STATE_SERVICE_PTR(state) state->service
+
+/*** Linux driver model integration ***/
+struct vs_serial_server_driver {
+ struct vs_server_serial *server;
+ struct list_head list;
+ struct vs_service_driver vsdrv;
+};
+
+#define to_server_driver(d) \
+ container_of(d, struct vs_serial_server_driver, vsdrv)
+
+static void reset_nack_requests(struct vs_service_device *service)
+{
+
+}
+
+static void serial_handle_start(struct vs_service_device *service)
+{
+
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void serial_handle_reset(struct vs_service_device *service)
+{
+
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (server->closed)
+ server->closed(state);
+
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock(service);
+}
+
+static void serial_handle_start_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static void serial_handle_reset_bh(struct vs_service_device *service)
+{
+
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server __maybe_unused =
+ to_server_driver(vsdrv)->server;
+
+ vs_service_state_lock_bh(service);
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base)) {
+ vs_service_state_unlock_bh(service);
+ return;
+ }
+ state->state.base = VSERVICE_BASE_RESET_STATE;
+ reset_nack_requests(service);
+ if (server->closed)
+ server->closed(state);
+
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ vs_service_state_unlock_bh(service);
+}
+
+static int serial_server_probe(struct vs_service_device *service);
+static int serial_server_remove(struct vs_service_device *service);
+static int serial_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *_mbuf);
+static void serial_handle_notify(struct vs_service_device *service,
+ uint32_t flags);
+static void serial_handle_start(struct vs_service_device *service);
+static void serial_handle_start_bh(struct vs_service_device *service);
+static void serial_handle_reset(struct vs_service_device *service);
+static void serial_handle_reset_bh(struct vs_service_device *service);
+static int serial_handle_tx_ready(struct vs_service_device *service);
+
+int __vservice_serial_server_register(struct vs_server_serial *server,
+ const char *name, struct module *owner)
+{
+ int ret;
+ struct vs_serial_server_driver *driver;
+
+ if (server->tx_atomic && !server->rx_atomic)
+ return -EINVAL;
+
+ driver = kzalloc(sizeof(*driver), GFP_KERNEL);
+ if (!driver) {
+ ret = -ENOMEM;
+ goto fail_alloc_driver;
+ }
+
+ server->driver = &driver->vsdrv;
+ driver->server = server;
+
+ driver->vsdrv.protocol = VSERVICE_SERIAL_PROTOCOL_NAME;
+
+ driver->vsdrv.is_server = true;
+ driver->vsdrv.rx_atomic = server->rx_atomic;
+ driver->vsdrv.tx_atomic = server->tx_atomic;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.in_quota_min = 1;
+ driver->vsdrv.in_quota_best = server->in_quota_best ?
+ server->in_quota_best : driver->vsdrv.in_quota_min;
+ /* FIXME Jira ticket SDK-2835 - philipd. */
+ driver->vsdrv.out_quota_min = 1;
+ driver->vsdrv.out_quota_best = server->out_quota_best ?
+ server->out_quota_best : driver->vsdrv.out_quota_min;
+ driver->vsdrv.in_notify_count = VSERVICE_SERIAL_NBIT_IN__COUNT;
+ driver->vsdrv.out_notify_count = VSERVICE_SERIAL_NBIT_OUT__COUNT;
+
+ driver->vsdrv.probe = serial_server_probe;
+ driver->vsdrv.remove = serial_server_remove;
+ driver->vsdrv.receive = serial_handle_message;
+ driver->vsdrv.notify = serial_handle_notify;
+ driver->vsdrv.start = server->tx_atomic ?
+ serial_handle_start_bh : serial_handle_start;
+ driver->vsdrv.reset = server->tx_atomic ?
+ serial_handle_reset_bh : serial_handle_reset;
+ driver->vsdrv.tx_ready = serial_handle_tx_ready;
+ driver->vsdrv.out_notify_count = 0;
+ driver->vsdrv.in_notify_count = 0;
+ driver->vsdrv.driver.name = name;
+ driver->vsdrv.driver.owner = owner;
+ driver->vsdrv.driver.bus = &vs_server_bus_type;
+
+ ret = driver_register(&driver->vsdrv.driver);
+
+ if (ret) {
+ goto fail_driver_register;
+ }
+
+ return 0;
+
+ fail_driver_register:
+ server->driver = NULL;
+ kfree(driver);
+ fail_alloc_driver:
+ return ret;
+}
+
+EXPORT_SYMBOL(__vservice_serial_server_register);
+
+int vservice_serial_server_unregister(struct vs_server_serial *server)
+{
+ struct vs_serial_server_driver *driver;
+
+ if (!server->driver)
+ return 0;
+
+ driver = to_server_driver(server->driver);
+ driver_unregister(&driver->vsdrv.driver);
+
+ server->driver = NULL;
+ kfree(driver);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vservice_serial_server_unregister);
+
+static int serial_server_probe(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+ struct vs_server_serial_state *state;
+
+ state = server->alloc(service);
+ if (!state)
+ return -ENOMEM;
+ else if (IS_ERR(state))
+ return PTR_ERR(state);
+
+ state->service = vs_get_service(service);
+ state->state = VSERVICE_SERIAL_PROTOCOL_RESET_STATE;
+
+ dev_set_drvdata(&service->dev, state);
+
+ return 0;
+}
+
+static int serial_server_remove(struct vs_service_device *service)
+{
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+
+ state->released = true;
+ dev_set_drvdata(&service->dev, NULL);
+ server->release(state);
+
+ vs_put_service(service);
+
+ return 0;
+}
+
+static int serial_handle_tx_ready(struct vs_service_device *service)
+{
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_server_serial *server = to_server_driver(vsdrv)->server;
+ struct vs_server_serial_state *state = dev_get_drvdata(&service->dev);
+
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(state->state.base))
+ return 0;
+
+ if (server->tx_ready)
+ server->tx_ready(state);
+
+ return 0;
+}
+
+static int
+vs_server_serial_send_ack_open(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 4UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_ACK_OPEN;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ _state->packet_size;
+ _state->serial.packet_size = _state->packet_size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_open);
+static int
+vs_server_serial_send_nack_open(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED__OPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_NACK_OPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_open);
+static int
+vs_server_serial_send_ack_close(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_ACK_CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_close);
+static int
+vs_server_serial_send_nack_close(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__CLOSE:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_NACK_CLOSE;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_close);
+static int
+vs_server_serial_send_ack_reopen(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_ACK_REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE__RESET;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_ack_reopen);
+static int
+vs_server_serial_send_nack_reopen(struct vs_server_serial_state *_state,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+
+ const size_t _msg_size = sizeof(vs_message_id_t) + 0UL;
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING__REOPEN:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return PTR_ERR(_mbuf);
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+
+ return -ENOMEM;
+ }
+
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) =
+ VSERVICE_SERIAL_BASE_NACK_REOPEN;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_send_nack_reopen);
+static int
+vs_server_serial_handle_req_open(const struct vs_server_serial *_server,
+ struct vs_server_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_CLOSED:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_CLOSED__OPEN;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->open)
+ return vs_server_serial_open_complete(_state,
+ _server->open(_state));
+ return vs_server_serial_open_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_serial_open_complete(struct vs_server_serial_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS)
+ ret =
+ vs_server_serial_send_ack_open(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ else if (resp == VS_SERVER_RESP_FAILURE)
+ ret =
+ vs_server_serial_send_nack_open(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_open_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_open);
+static int
+vs_server_serial_handle_req_close(const struct vs_server_serial *_server,
+ struct vs_server_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__CLOSE;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->close)
+ return vs_server_serial_close_complete(_state,
+ _server->close(_state));
+ return vs_server_serial_close_complete(_state, VS_SERVER_RESP_SUCCESS);
+
+}
+
+int vs_server_serial_close_complete(struct vs_server_serial_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS)
+ ret =
+ vs_server_serial_send_ack_close(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ else if (resp == VS_SERVER_RESP_FAILURE)
+ ret =
+ vs_server_serial_send_nack_close(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ if ((resp == VS_SERVER_RESP_SUCCESS) && (ret == 0)) {
+ wake_up_all(&_state->service->quota_wq);
+ }
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_close_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_close);
+static int
+vs_server_serial_handle_req_reopen(const struct vs_server_serial *_server,
+ struct vs_server_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _expected_size = sizeof(vs_message_id_t) + 0UL;
+
+ if (VS_MBUF_SIZE(_mbuf) < _expected_size)
+ return -EBADMSG;
+
+ switch (_state->state.base.statenum) {
+ case VSERVICE_BASE_STATE_RUNNING:
+
+ break;
+
+ default:
+ dev_err(&_state->service->dev,
+ "[%s:%d] Protocol error: In wrong protocol state %d - %s\n",
+ __func__, __LINE__, _state->state.base.statenum,
+ vservice_base_get_state_string(_state->state.base));
+
+ return -EPROTO;
+
+ }
+ _state->state.base.statenum = VSERVICE_BASE_STATE_RUNNING__REOPEN;
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (_server->reopen)
+ return vs_server_serial_reopen_complete(_state,
+ _server->
+ reopen(_state));
+ else
+ return vs_server_serial_send_nack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC
+ : GFP_KERNEL);
+
+}
+
+int vs_server_serial_reopen_complete(struct vs_server_serial_state *_state,
+ vs_server_response_type_t resp)
+{
+ int ret = 0;
+ if (resp == VS_SERVER_RESP_SUCCESS) {
+ ret =
+ vs_server_serial_send_ack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ } else if (resp == VS_SERVER_RESP_FAILURE) {
+ ret =
+ vs_server_serial_send_nack_reopen(_state,
+ vs_service_has_atomic_rx
+ (VS_STATE_SERVICE_PTR
+ (_state)) ? GFP_ATOMIC :
+ GFP_KERNEL);
+ }
+
+ return ret;
+
+}
+
+EXPORT_SYMBOL(vs_server_serial_reopen_complete);
+
+EXPORT_SYMBOL(vs_server_serial_handle_req_reopen);
+struct vs_mbuf *vs_server_serial_serial_alloc_msg(struct vs_server_serial_state
+ *_state, struct vs_pbuf *b,
+ gfp_t flags)
+{
+ struct vs_mbuf *_mbuf;
+ const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+ const uint32_t _msg_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ _mbuf =
+ vs_service_alloc_mbuf(VS_STATE_SERVICE_PTR(_state), _msg_size,
+ flags);
+ if (IS_ERR(_mbuf))
+ return _mbuf;
+ if (!_mbuf) {
+
+ WARN_ON_ONCE(1);
+ return ERR_PTR(-ENOMEM);
+ }
+ *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) = _msg_id;
+
+ if (!b)
+ goto fail;
+ b->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b->size = _state->serial.packet_size;
+ b->max_size = b->size;
+ return _mbuf;
+
+ fail:
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ return NULL;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_alloc_msg);
+int vs_server_serial_serial_getbufs_msg(struct vs_server_serial_state *_state,
+ struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf)
+{
+ const vs_message_id_t _msg_id = VSERVICE_SERIAL_SERIAL_MSG_MSG;
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ const size_t _min_size = _max_size - _state->serial.packet_size;
+ size_t _exact_size;
+
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) != _msg_id)
+ return -EINVAL;
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ b->size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ b->data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b->max_size = b->size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->serial.packet_size - b->size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_getbufs_msg);
+int vs_server_serial_serial_free_msg(struct vs_server_serial_state *_state,
+ struct vs_pbuf *b, struct vs_mbuf *_mbuf)
+{
+ vs_service_free_mbuf(VS_STATE_SERVICE_PTR(_state), _mbuf);
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_free_msg);
+int
+vs_server_serial_serial_send_msg(struct vs_server_serial_state *_state,
+ struct vs_pbuf b, struct vs_mbuf *_mbuf)
+{
+
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(VS_STATE_SERVICE_PTR(_state)->dev.driver);
+ __maybe_unused struct vs_server_serial *_server =
+ to_server_driver(vsdrv)->server;
+ if (_state->state.base.statenum != VSERVICE_BASE_STATE_RUNNING)
+ return -EPROTO;
+ if (*(vs_message_id_t *) (VS_MBUF_DATA(_mbuf)) !=
+ VSERVICE_SERIAL_SERIAL_MSG_MSG)
+
+ return -EINVAL;
+
+ if ((b.size + sizeof(vs_message_id_t) + 0UL) > VS_MBUF_SIZE(_mbuf))
+ return -EINVAL;
+
+ if (b.size < b.max_size)
+ VS_MBUF_SIZE(_mbuf) -= (b.max_size - b.size);
+
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL) =
+ b.size;
+
+ {
+ int err = vs_service_send(VS_STATE_SERVICE_PTR(_state), _mbuf);
+ if (err) {
+ dev_warn(&_state->service->dev,
+ "[%s:%d] Protocol warning: Error %d sending message on transport.\n",
+ __func__, __LINE__, err);
+
+ return err;
+ }
+ }
+
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_send_msg);
+static int
+vs_server_serial_serial_handle_msg(const struct vs_server_serial *_server,
+ struct vs_server_serial_state *_state,
+ struct vs_mbuf *_mbuf)
+{
+ const size_t _max_size =
+ sizeof(vs_message_id_t) + _state->serial.packet_size + 4UL;
+ struct vs_pbuf b;
+ const size_t _min_size = _max_size - _state->serial.packet_size;
+ size_t _exact_size;
+ if (!VSERVICE_BASE_STATE_IS_RUNNING(_state->state.base))
+ return -EPROTO;
+
+ /* The first check is to ensure the message isn't complete garbage */
+ if ((VS_MBUF_SIZE(_mbuf) > _max_size)
+ || (VS_MBUF_SIZE(_mbuf) < _min_size))
+ return -EBADMSG;
+
+ b.size =
+ *(uint32_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL);
+ b.data =
+ (uintptr_t *) (VS_MBUF_DATA(_mbuf) + sizeof(vs_message_id_t) + 0UL +
+ sizeof(uint32_t));
+ b.max_size = b.size;
+
+ /* Now check the size received is the exact size expected */
+ _exact_size = _max_size - (_state->serial.packet_size - b.size);
+ if (VS_MBUF_SIZE(_mbuf) != _exact_size)
+ return -EBADMSG;
+ if (_server->serial.msg_msg)
+ return _server->serial.msg_msg(_state, b, _mbuf);
+ return 0;
+ return 0;
+}
+
+EXPORT_SYMBOL(vs_server_serial_serial_handle_msg);
+static int
+serial_handle_message(struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ vs_message_id_t message_id;
+ __maybe_unused struct vs_server_serial_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_serial *server =
+ to_server_driver(vsdrv)->server;
+
+ int ret;
+
+ /* Extract the message ID */
+ if (VS_MBUF_SIZE(_mbuf) < sizeof(message_id)) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Invalid message size %zd\n",
+ __func__, __LINE__, VS_MBUF_SIZE(_mbuf));
+
+ return -EBADMSG;
+ }
+
+ message_id = *(vs_message_id_t *) (VS_MBUF_DATA(_mbuf));
+
+ switch (message_id) {
+
+/** interface base **/
+/* command in sync open */
+ case VSERVICE_SERIAL_BASE_REQ_OPEN:
+ ret = vs_server_serial_handle_req_open(server, state, _mbuf);
+ break;
+
+/* command in sync close */
+ case VSERVICE_SERIAL_BASE_REQ_CLOSE:
+ ret = vs_server_serial_handle_req_close(server, state, _mbuf);
+ break;
+
+/* command in sync reopen */
+ case VSERVICE_SERIAL_BASE_REQ_REOPEN:
+ ret = vs_server_serial_handle_req_reopen(server, state, _mbuf);
+ break;
+
+/** interface serial **/
+/* message msg */
+ case VSERVICE_SERIAL_SERIAL_MSG_MSG:
+ ret = vs_server_serial_serial_handle_msg(server, state, _mbuf);
+ break;
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown message type %d\n",
+ __func__, __LINE__, (int)message_id);
+
+ ret = -EPROTO;
+ break;
+ }
+
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for message type %d returned %d\n",
+ __func__, __LINE__, (int)message_id, ret);
+
+ }
+
+ return ret;
+}
+
+static void serial_handle_notify(struct vs_service_device *service,
+ uint32_t notify_bits)
+{
+ __maybe_unused struct vs_server_serial_state *state =
+ dev_get_drvdata(&service->dev);
+ struct vs_service_driver *vsdrv =
+ to_vs_service_driver(service->dev.driver);
+ __maybe_unused struct vs_server_serial *server =
+ to_server_driver(vsdrv)->server;
+
+ uint32_t bits = notify_bits;
+ int ret;
+
+ while (bits) {
+ uint32_t not = __ffs(bits);
+ switch (not) {
+
+ /** interface serial **/
+
+ default:
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Unknown notification %d\n",
+ __func__, __LINE__, (int)not);
+
+ ret = -EPROTO;
+ break;
+
+ }
+ bits &= ~(1 << not);
+ if (ret) {
+ dev_err(&state->service->dev,
+ "[%s:%d] Protocol error: Handler for notification %d returned %d\n",
+ __func__, __LINE__, (int)not, ret);
+
+ }
+ }
+}
+
+MODULE_DESCRIPTION("OKL4 Virtual Services serialServer Protocol Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.c b/drivers/vservices/session.c
new file mode 100644
index 0000000..d695184
--- /dev/null
+++ b/drivers/vservices/session.c
@@ -0,0 +1,2913 @@
+/*
+ * drivers/vservices/session.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the generic session-management code for the vServices framework.
+ * It creates service and session devices on request from session and
+ * transport drivers, respectively; it also queues incoming messages from the
+ * transport and distributes them to the session's services.
+ */
+
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/jiffies.h>
+#include <linux/workqueue.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/mutex.h>
+#include <linux/bitops.h>
+#include <linux/slab.h>
+#include <linux/kdev_t.h>
+#include <linux/err.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include "session.h"
+#include "transport.h"
+#include "compat.h"
+
+/* Minimum required time between resets to avoid throttling */
+#define RESET_THROTTLE_TIME msecs_to_jiffies(1000)
+
+/*
+ * Minimum/maximum reset throttling time. The reset throttle will start at
+ * the minimum and increase to the maximum exponetially.
+ */
+#define RESET_THROTTLE_MIN RESET_THROTTLE_TIME
+#define RESET_THROTTLE_MAX msecs_to_jiffies(8 * 1000)
+
+/*
+ * If the reset is being throttled and a sane reset (doesn't need throttling)
+ * is requested, then if the service's reset delay mutliplied by this value
+ * has elapsed throttling is disabled.
+ */
+#define RESET_THROTTLE_COOL_OFF_MULT 2
+
+/* IDR of session ids to sessions */
+static DEFINE_IDR(session_idr);
+DEFINE_MUTEX(vs_session_lock);
+EXPORT_SYMBOL_GPL(vs_session_lock);
+
+/* Notifier list for vService session events */
+static BLOCKING_NOTIFIER_HEAD(vs_session_notifier_list);
+
+static unsigned long default_debug_mask;
+module_param(default_debug_mask, ulong, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(default_debug_mask, "Default vServices debug mask");
+
+/* vServices root in sysfs at /sys/vservices */
+struct kobject *vservices_root;
+EXPORT_SYMBOL_GPL(vservices_root);
+
+/* vServices server root in sysfs at /sys/vservices/server-sessions */
+struct kobject *vservices_server_root;
+EXPORT_SYMBOL_GPL(vservices_server_root);
+
+/* vServices client root in sysfs at /sys/vservices/client-sessions */
+struct kobject *vservices_client_root;
+EXPORT_SYMBOL_GPL(vservices_client_root);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+struct vs_service_device *vs_service_lookup_by_devt(dev_t dev)
+{
+ struct vs_session_device *session;
+ struct vs_service_device *service;
+
+ mutex_lock(&vs_session_lock);
+ session = idr_find(&session_idr, MINOR(dev) / VS_MAX_SERVICES);
+ get_device(&session->dev);
+ mutex_unlock(&vs_session_lock);
+
+ service = vs_session_get_service(session,
+ MINOR(dev) % VS_MAX_SERVICES);
+ put_device(&session->dev);
+
+ return service;
+}
+#endif
+
+struct vs_session_for_each_data {
+ int (*fn)(struct vs_session_device *session, void *data);
+ void *data;
+};
+
+int vs_session_for_each_from_idr(int id, void *session, void *_data)
+{
+ struct vs_session_for_each_data *data =
+ (struct vs_session_for_each_data *)_data;
+ return data->fn(session, data->data);
+}
+
+/**
+ * vs_session_for_each_locked - call a callback function for each session
+ * @fn: function to call
+ * @data: opaque pointer that is passed through to the function
+ */
+extern int vs_session_for_each_locked(
+ int (*fn)(struct vs_session_device *session, void *data),
+ void *data)
+{
+ struct vs_session_for_each_data priv = { .fn = fn, .data = data };
+
+ lockdep_assert_held(&vs_session_lock);
+
+ return idr_for_each(&session_idr, vs_session_for_each_from_idr,
+ &priv);
+}
+EXPORT_SYMBOL(vs_session_for_each_locked);
+
+/**
+ * vs_register_notify - register a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_register_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_register(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_register_notify);
+
+/**
+ * vs_unregister_notify - unregister a notifier callback for vServices events
+ * @nb: pointer to the notifier block for the callback events.
+ */
+void vs_session_unregister_notify(struct notifier_block *nb)
+{
+ blocking_notifier_chain_unregister(&vs_session_notifier_list, nb);
+}
+EXPORT_SYMBOL(vs_session_unregister_notify);
+
+/*
+ * Helper function for returning how long ago something happened
+ * Marked as __maybe_unused since this is only needed when
+ * CONFIG_VSERVICES_DEBUG is enabled, but cannot be removed because it
+ * will cause compile time errors.
+ */
+static __maybe_unused unsigned msecs_ago(unsigned long jiffy_value)
+{
+ return jiffies_to_msecs(jiffies - jiffy_value);
+}
+
+static void session_fatal_error_work(struct work_struct *work)
+{
+ struct vs_session_device *session = container_of(work,
+ struct vs_session_device, fatal_error_work);
+
+ session->transport->vt->reset(session->transport);
+}
+
+static void session_fatal_error(struct vs_session_device *session, gfp_t gfp)
+{
+ schedule_work(&session->fatal_error_work);
+}
+
+/*
+ * Service readiness state machine
+ *
+ * The states are:
+ *
+ * INIT: Initial state. Service may not be completely configured yet
+ * (typically because the protocol hasn't been set); call vs_service_start
+ * once configuration is complete. The disable count must be nonzero, and
+ * must never reach zero in this state.
+ * DISABLED: Service is not permitted to communicate. Non-core services are
+ * in this state whenever the core protocol and/or transport state does not
+ * allow them to be active; core services are only in this state transiently.
+ * The disable count must be nonzero; when it reaches zero, the service
+ * transitions to RESET state.
+ * RESET: Service drivers are inactive at both ends, but the core service
+ * state allows the service to become active. The session will schedule a
+ * future transition to READY state when entering this state, but the
+ * transition may be delayed to throttle the rate at which resets occur.
+ * READY: All core-service and session-layer policy allows the service to
+ * communicate; it will become active as soon as it has a protocol driver.
+ * ACTIVE: The driver is present and communicating.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement, unless the disable count is nonzero in which case we
+ * will enter DISABLED state.
+ * LOCAL_DELETE: As for LOCAL_RESET, but we will enter the DELETED state
+ * instead of RESET or DISABLED.
+ * DELETED: The service is no longer present on the session; the service
+ * device structure may still exist because something is holding a reference
+ * to it.
+ *
+ * The permitted transitions are:
+ *
+ * From To Trigger
+ * INIT DISABLED vs_service_start
+ * DISABLED RESET vs_service_enable (disable_count -> 0)
+ * RESET READY End of throttle delay (may be 0)
+ * READY ACTIVE Latter of probe() and entering READY
+ * {READY, ACTIVE}
+ * LOCAL_RESET vs_service_reset
+ * {READY, ACTIVE, LOCAL_RESET}
+ * RESET vs_service_handle_reset (server)
+ * RESET DISABLED vs_service_disable (server)
+ * {READY, ACTIVE, LOCAL_RESET}
+ * DISABLED vs_service_handle_reset (client)
+ * {INIT, RESET, READY, ACTIVE, LOCAL_RESET}
+ * DISABLED vs_service_disable_noncore
+ * {ACTIVE, LOCAL_RESET}
+ * LOCAL_DELETE vs_service_delete
+ * {INIT, DISABLED, RESET, READY}
+ * DELETED vs_service_delete
+ * LOCAL_DELETE DELETED vs_service_handle_reset
+ * vs_service_disable_noncore
+ *
+ * See the documentation for the triggers for details.
+ */
+
+enum vs_service_readiness {
+ VS_SERVICE_INIT,
+ VS_SERVICE_DISABLED,
+ VS_SERVICE_RESET,
+ VS_SERVICE_READY,
+ VS_SERVICE_ACTIVE,
+ VS_SERVICE_LOCAL_RESET,
+ VS_SERVICE_LOCAL_DELETE,
+ VS_SERVICE_DELETED,
+};
+
+/* Session activation states. */
+enum {
+ VS_SESSION_RESET,
+ VS_SESSION_ACTIVATE,
+ VS_SESSION_ACTIVE,
+};
+
+/**
+ * vs_service_start - Start a service by moving it from the init state to the
+ * disabled state.
+ *
+ * @service: The service to start.
+ *
+ * Returns true if the service was started, or false if it was not.
+ */
+bool vs_service_start(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+
+ WARN_ON(!service->protocol);
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ if (service->readiness != VS_SERVICE_INIT) {
+ if (service->readiness != VS_SERVICE_DELETED)
+ dev_err(&service->dev,
+ "start called from invalid state %d\n",
+ service->readiness);
+ mutex_unlock(&service->ready_lock);
+ return false;
+ }
+
+ if (service->id != 0 && session_drv->service_added) {
+ int err = session_drv->service_added(session, service);
+ if (err < 0) {
+ dev_err(&session->dev, "Failed to add service %d: %d\n",
+ service->id, err);
+ mutex_unlock(&service->ready_lock);
+ return false;
+ }
+ }
+
+ service->readiness = VS_SERVICE_DISABLED;
+ service->disable_count = 1;
+ service->last_reset_request = jiffies;
+
+ mutex_unlock(&service->ready_lock);
+
+ /* Tell userspace about the service. */
+ dev_set_uevent_suppress(&service->dev, false);
+ kobject_uevent(&service->dev.kobj, KOBJ_ADD);
+
+ return true;
+}
+EXPORT_SYMBOL_GPL(vs_service_start);
+
+static void cancel_pending_rx(struct vs_service_device *service);
+static void queue_ready_work(struct vs_service_device *service);
+
+static void __try_start_service(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+ struct vs_transport *transport;
+ int err;
+ struct vs_service_driver *driver;
+
+ lockdep_assert_held(&service->ready_lock);
+
+ /* We can't start if the service is not ready yet. */
+ if (service->readiness != VS_SERVICE_READY)
+ return;
+
+ /*
+ * There should never be anything in the RX queue at this point.
+ * If there is, it can seriously confuse the service drivers for
+ * no obvious reason, so we check.
+ */
+ if (WARN_ON(!list_empty(&service->rx_queue)))
+ cancel_pending_rx(service);
+
+ if (!service->driver_probed) {
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+ "ready with no driver\n");
+ return;
+ }
+
+ /* Prepare the transport to support the service. */
+ transport = session->transport;
+ err = transport->vt->service_start(transport, service);
+
+ if (err < 0) {
+ /* fatal error attempting to start; reset and try again */
+ service->readiness = VS_SERVICE_RESET;
+ service->last_reset_request = jiffies;
+ service->last_reset = jiffies;
+ queue_ready_work(service);
+
+ return;
+ }
+
+ service->readiness = VS_SERVICE_ACTIVE;
+
+ driver = to_vs_service_driver(service->dev.driver);
+ if (driver->start)
+ driver->start(service);
+
+ if (service->id && session_drv->service_start) {
+ err = session_drv->service_start(session, service);
+ if (err < 0) {
+ dev_err(&session->dev, "Failed to start service %s (%d): %d\n",
+ dev_name(&service->dev),
+ service->id, err);
+ session_fatal_error(session, GFP_KERNEL);
+ }
+ }
+}
+
+static void try_start_service(struct vs_service_device *service)
+{
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ __try_start_service(service);
+
+ mutex_unlock(&service->ready_lock);
+}
+
+static void service_ready_work(struct work_struct *work)
+{
+ struct vs_service_device *service = container_of(work,
+ struct vs_service_device, ready_work.work);
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+ "ready work - last reset request was %u ms ago\n",
+ msecs_ago(service->last_reset_request));
+
+ /*
+ * Make sure there's no reset work pending from an earlier driver
+ * failure. We should already be inactive at this point, so it's safe
+ * to just cancel it.
+ */
+ cancel_work_sync(&service->reset_work);
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ if (service->readiness != VS_SERVICE_RESET) {
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+ "ready work found readiness of %d, doing nothing\n",
+ service->readiness);
+ mutex_unlock(&service->ready_lock);
+ return;
+ }
+
+ service->readiness = VS_SERVICE_READY;
+ /* Record the time at which this happened, for throttling. */
+ service->last_ready = jiffies;
+
+ /* Tell userspace that the service is ready. */
+ kobject_uevent(&service->dev.kobj, KOBJ_ONLINE);
+
+ /* Start the service, if it has a driver attached. */
+ __try_start_service(service);
+
+ mutex_unlock(&service->ready_lock);
+}
+
+static int __enable_service(struct vs_service_device *service);
+
+/**
+ * __reset_service - make a service inactive, and tell its driver, the
+ * transport, and possibly the remote partner
+ * @service: The service to reset
+ * @notify_remote: If true, the partner is notified of the reset
+ *
+ * This routine is called to make an active service inactive. If the given
+ * service is currently active, it drops any queued messages for the service,
+ * and then informs the service driver and the transport layer that the
+ * service has reset. It sets the service readiness to VS_SERVICE_LOCAL_RESET
+ * to indicate that the driver is no longer active.
+ *
+ * This routine has no effect on services that are not active.
+ *
+ * The caller must hold the target service's ready lock.
+ */
+static void __reset_service(struct vs_service_device *service,
+ bool notify_remote)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+ struct vs_service_driver *driver = NULL;
+ struct vs_transport *transport;
+ int err;
+
+ lockdep_assert_held(&service->ready_lock);
+
+ /* If we're already inactive, there's nothing to do. */
+ if (service->readiness != VS_SERVICE_ACTIVE)
+ return;
+
+ service->last_reset = jiffies;
+ service->readiness = VS_SERVICE_LOCAL_RESET;
+
+ cancel_pending_rx(service);
+
+ if (!WARN_ON(!service->driver_probed))
+ driver = to_vs_service_driver(service->dev.driver);
+
+ if (driver && driver->reset)
+ driver->reset(service);
+
+ wake_up_all(&service->quota_wq);
+
+ transport = vs_service_get_session(service)->transport;
+
+ /*
+ * Ask the transport to reset the service. If this returns a positive
+ * value, we need to leave the service disabled, and the transport
+ * will re-enable it. To avoid allowing the disable count to go
+ * negative if that re-enable races with this callback returning, we
+ * disable the service beforehand and re-enable it if the callback
+ * returns zero.
+ */
+ service->disable_count++;
+ err = transport->vt->service_reset(transport, service);
+ if (err < 0) {
+ dev_err(&session->dev, "Failed to reset service %d: %d (transport)\n",
+ service->id, err);
+ session_fatal_error(session, GFP_KERNEL);
+ } else if (!err) {
+ err = __enable_service(service);
+ }
+
+ if (notify_remote) {
+ if (service->id) {
+ err = session_drv->service_local_reset(session,
+ service);
+ if (err == VS_SERVICE_ALREADY_RESET) {
+ service->readiness = VS_SERVICE_RESET;
+ service->last_reset = jiffies;
+ queue_ready_work(service);
+
+ } else if (err < 0) {
+ dev_err(&session->dev, "Failed to reset service %d: %d (session)\n",
+ service->id, err);
+ session_fatal_error(session, GFP_KERNEL);
+ }
+ } else {
+ session->transport->vt->reset(session->transport);
+ }
+ }
+
+ /* Tell userspace that the service is no longer active. */
+ kobject_uevent(&service->dev.kobj, KOBJ_OFFLINE);
+}
+
+/**
+ * reset_service - reset a service and inform the remote partner
+ * @service: The service to reset
+ *
+ * This routine is called when a reset is locally initiated (other than
+ * implicitly by a session / core service reset). It bumps the reset request
+ * timestamp, acquires the necessary locks, and calls __reset_service.
+ *
+ * This routine returns with the service ready lock held, to allow the caller
+ * to make any other state changes that must be atomic with the service
+ * reset.
+ */
+static void reset_service(struct vs_service_device *service)
+ __acquires(service->ready_lock)
+{
+ service->last_reset_request = jiffies;
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ __reset_service(service, true);
+}
+
+/**
+ * vs_service_reset - initiate a service reset
+ * @service: the service that is to be reset
+ * @caller: the service that is initiating the reset
+ *
+ * This routine informs the partner that the given service is being reset,
+ * then disables and flushes the service's receive queues and resets its
+ * driver. The service will be automatically re-enabled once the partner has
+ * acknowledged the reset (see vs_session_handle_service_reset, above).
+ *
+ * If the given service is the core service, this will perform a transport
+ * reset, which implicitly resets (on the server side) or destroys (on
+ * the client side) every other service on the session.
+ *
+ * If the given service is already being reset, this has no effect, other
+ * than to delay completion of the reset if it is being throttled.
+ *
+ * For lock safety reasons, a service can only be directly reset by itself,
+ * the core service, or the service that created it (which is typically also
+ * the core service).
+ *
+ * A service that wishes to reset itself must not do so while holding its state
+ * lock or while running on its own workqueue. In these circumstances, call
+ * vs_service_reset_nosync() instead. Note that returning an error code
+ * (any negative number) from a driver callback forces a call to
+ * vs_service_reset_nosync() and prints an error message.
+ */
+int vs_service_reset(struct vs_service_device *service,
+ struct vs_service_device *caller)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ if (caller != service && caller != service->owner) {
+ struct vs_service_device *core_service = session->core_service;
+
+ WARN_ON(!core_service);
+ if (caller != core_service)
+ return -EPERM;
+ }
+
+ reset_service(service);
+ /* reset_service returns with ready_lock held, but we don't need it */
+ mutex_unlock(&service->ready_lock);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_reset);
+
+/**
+ * vs_service_reset_nosync - asynchronously reset a service.
+ * @service: the service that is to be reset
+ *
+ * This routine triggers a reset for the nominated service. It may be called
+ * from any context, including interrupt context. It does not wait for the
+ * reset to occur, and provides no synchronisation guarantees when called from
+ * outside the target service.
+ *
+ * This is intended only for service drivers that need to reset themselves
+ * from a context that would not normally allow it. In other cases, use
+ * vs_service_reset.
+ */
+void vs_service_reset_nosync(struct vs_service_device *service)
+{
+ service->pending_reset = true;
+ schedule_work(&service->reset_work);
+}
+EXPORT_SYMBOL_GPL(vs_service_reset_nosync);
+
+static void
+vs_service_remove_sysfs_entries(struct vs_session_device *session,
+ struct vs_service_device *service)
+{
+ sysfs_remove_link(session->sysfs_entry, service->sysfs_name);
+ sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+}
+
+static void vs_session_release_service_id(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ mutex_lock(&session->service_idr_lock);
+ idr_remove(&session->service_idr, service->id);
+ mutex_unlock(&session->service_idr_lock);
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev,
+ "service id deallocated\n");
+}
+
+static void destroy_service(struct vs_service_device *service,
+ bool notify_remote)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+ struct vs_service_device *core_service __maybe_unused =
+ session->core_service;
+ int err;
+
+ lockdep_assert_held(&service->ready_lock);
+ WARN_ON(service->readiness != VS_SERVICE_DELETED);
+
+ /* Notify the core service and transport that the service is gone */
+ session->transport->vt->service_remove(session->transport, service);
+ if (notify_remote && service->id && session_drv->service_removed) {
+ err = session_drv->service_removed(session, service);
+ if (err < 0) {
+ dev_err(&session->dev,
+ "Failed to remove service %d: %d\n",
+ service->id, err);
+ session_fatal_error(session, GFP_KERNEL);
+ }
+ }
+
+ /*
+ * At this point the service is guaranteed to be gone on the client
+ * side, so we can safely release the service ID.
+ */
+ if (session->is_server)
+ vs_session_release_service_id(service);
+
+ /*
+ * This guarantees that any concurrent vs_session_get_service() that
+ * found the service before we removed it from the IDR will take a
+ * reference before we release ours.
+ *
+ * This similarly protects for_each_[usable_]service().
+ */
+ synchronize_rcu();
+
+ /* Matches device_initialize() in vs_service_register() */
+ put_device(&service->dev);
+}
+
+/**
+ * disable_service - prevent a service becoming ready
+ * @service: the service that is to be disabled
+ * @force: true if the service is known to be in reset
+ *
+ * This routine may be called for any inactive service. Once disabled, the
+ * service cannot be made ready by the session, and thus cannot become active,
+ * until vs_service_enable() is called for it. If multiple calls are made to
+ * this function, they must be balanced by vs_service_enable() calls.
+ *
+ * If the force option is true, then any pending unacknowledged reset will be
+ * presumed to have been acknowledged. This is used when the core service is
+ * entering reset.
+ *
+ * This is used by the core service client to prevent the service restarting
+ * until the server is ready (i.e., a server_ready message is received); by
+ * the session layer to stop all communication while the core service itself
+ * is in reset; and by the transport layer when the transport was unable to
+ * complete reset of a service in its reset callback (typically because
+ * a service had passed message buffers to another Linux subsystem and could
+ * not free them immediately).
+ *
+ * In any case, there is no need for the operation to be signalled in any
+ * way, because the service is already in reset. It simply delays future
+ * signalling of service readiness.
+ */
+static void disable_service(struct vs_service_device *service, bool force)
+{
+ lockdep_assert_held(&service->ready_lock);
+
+ switch(service->readiness) {
+ case VS_SERVICE_INIT:
+ case VS_SERVICE_DELETED:
+ case VS_SERVICE_LOCAL_DELETE:
+ dev_err(&service->dev, "disabled while uninitialised\n");
+ break;
+ case VS_SERVICE_ACTIVE:
+ dev_err(&service->dev, "disabled while active\n");
+ break;
+ case VS_SERVICE_LOCAL_RESET:
+ /*
+ * Will go to DISABLED state when reset completes, unless
+ * it's being forced (i.e. we're moving to a core protocol
+ * state that implies everything else is reset).
+ */
+ if (force)
+ service->readiness = VS_SERVICE_DISABLED;
+ service->disable_count++;
+ break;
+ default:
+ service->readiness = VS_SERVICE_DISABLED;
+ service->disable_count++;
+ break;
+ }
+
+ cancel_delayed_work(&service->ready_work);
+}
+
+static int service_handle_reset(struct vs_session_device *session,
+ struct vs_service_device *target, bool disable)
+{
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+ int err = 0;
+
+ mutex_lock_nested(&target->ready_lock, target->lock_subclass);
+
+ switch (target->readiness) {
+ case VS_SERVICE_LOCAL_DELETE:
+ target->readiness = VS_SERVICE_DELETED;
+ destroy_service(target, true);
+ break;
+ case VS_SERVICE_ACTIVE:
+ /*
+ * Reset the service and send a reset notification.
+ *
+ * We only send notifications for non-core services. This is
+ * because core notifies by sending a transport reset, which
+ * is what brought us here in the first place. Note that we
+ * must already hold the core service state lock iff the
+ * target is non-core.
+ */
+ target->last_reset_request = jiffies;
+ __reset_service(target, target->id != 0);
+ /* fall through */
+ case VS_SERVICE_LOCAL_RESET:
+ target->readiness = target->disable_count ?
+ VS_SERVICE_DISABLED : VS_SERVICE_RESET;
+ if (disable)
+ disable_service(target, false);
+ if (target->readiness != VS_SERVICE_DISABLED)
+ queue_ready_work(target);
+ break;
+ case VS_SERVICE_READY:
+ /* Tell userspace that the service is no longer ready. */
+ kobject_uevent(&target->dev.kobj, KOBJ_OFFLINE);
+ /* fall through */
+ case VS_SERVICE_RESET:
+ /*
+ * This can happen for a non-core service if we get a reset
+ * request from the server on the client side, after the
+ * client has enabled the service but before it is active.
+ * Note that the service is already active on the server side
+ * at this point. The client's delay may be due to either
+ * reset throttling or the absence of a driver.
+ *
+ * We bump the reset request timestamp, disable the service
+ * again, and send back an acknowledgement.
+ */
+ if (disable && target->id) {
+ target->last_reset_request = jiffies;
+
+ err = session_drv->service_local_reset(
+ session, target);
+ if (err < 0) {
+ dev_err(&session->dev,
+ "Failed to reset service %d; %d\n",
+ target->id, err);
+ session_fatal_error(session,
+ GFP_KERNEL);
+ }
+
+ disable_service(target, false);
+ break;
+ }
+ /* fall through */
+ case VS_SERVICE_DISABLED:
+ /*
+ * This can happen for the core service if we get a reset
+ * before the transport has activated, or before the core
+ * service has become ready.
+ *
+ * We bump the reset request timestamp, and disable the
+ * service again if the transport had already activated and
+ * enabled it.
+ */
+ if (disable && !target->id) {
+ target->last_reset_request = jiffies;
+
+ if (target->readiness != VS_SERVICE_DISABLED)
+ disable_service(target, false);
+
+ break;
+ }
+ /* fall through */
+ default:
+ dev_warn(&target->dev, "remote reset while inactive (%d)\n",
+ target->readiness);
+ err = -EPROTO;
+ break;
+ }
+
+ mutex_unlock(&target->ready_lock);
+ return err;
+}
+
+/**
+ * vs_service_handle_reset - handle an incoming notification of a reset
+ * @session: the session that owns the service
+ * @service_id: the ID of the service that is to be reset
+ * @disable: if true, the service will not be automatically re-enabled
+ *
+ * This routine is called by the core service when the remote end notifies us
+ * of a non-core service reset. The service must be in ACTIVE, LOCAL_RESET or
+ * LOCAL_DELETED state. It must be called with the core service's state lock
+ * held.
+ *
+ * If the service was in ACTIVE state, the core service is called back to send
+ * a notification to the other end. If it was in LOCAL_DELETED state, it is
+ * unregistered.
+ */
+int vs_service_handle_reset(struct vs_session_device *session,
+ vs_service_id_t service_id, bool disable)
+{
+ struct vs_service_device *target;
+ int ret;
+
+ if (!service_id)
+ return -EINVAL;
+
+ target = vs_session_get_service(session, service_id);
+ if (!target)
+ return -ENODEV;
+
+ ret = service_handle_reset(session, target, disable);
+ vs_put_service(target);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_reset);
+
+static int __enable_service(struct vs_service_device *service)
+{
+ if (WARN_ON(!service->disable_count))
+ return -EINVAL;
+
+ if (--service->disable_count > 0)
+ return 0;
+
+ /*
+ * If the service is still resetting, it can't become ready until the
+ * reset completes. If it has been deleted, it will never become
+ * ready. In either case, there's nothing more to do.
+ */
+ if ((service->readiness == VS_SERVICE_LOCAL_RESET) ||
+ (service->readiness == VS_SERVICE_LOCAL_DELETE) ||
+ (service->readiness == VS_SERVICE_DELETED))
+ return 0;
+
+ if (WARN_ON(service->readiness != VS_SERVICE_DISABLED))
+ return -EINVAL;
+
+ service->readiness = VS_SERVICE_RESET;
+ service->last_reset = jiffies;
+ queue_ready_work(service);
+
+ return 0;
+}
+
+/**
+ * vs_service_enable - allow a service to become ready
+ * @service: the service that is to be enabled
+ *
+ * Calling this routine for a service permits the session layer to make the
+ * service ready. It will do so as soon as any outstanding reset throttling
+ * is complete, and will then start the service once it has a driver attached.
+ *
+ * Services are disabled, requiring a call to this routine to re-enable them:
+ * - when first initialised (after vs_service_start),
+ * - when reset on the client side by vs_service_handle_reset,
+ * - when the transport has delayed completion of a reset, and
+ * - when the server-side core protocol is disconnected or reset by
+ * vs_session_disable_noncore.
+ */
+int vs_service_enable(struct vs_service_device *service)
+{
+ int ret;
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ ret = __enable_service(service);
+
+ mutex_unlock(&service->ready_lock);
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(vs_service_enable);
+
+/*
+ * Service work functions
+ */
+static void queue_rx_work(struct vs_service_device *service)
+{
+ bool rx_atomic;
+
+ rx_atomic = vs_service_has_atomic_rx(service);
+ vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+ &service->dev, "Queuing rx %s\n",
+ rx_atomic ? "tasklet (atomic)" : "work (cansleep)");
+
+ if (rx_atomic)
+ tasklet_schedule(&service->rx_tasklet);
+ else
+ queue_work(service->work_queue, &service->rx_work);
+}
+
+static void cancel_pending_rx(struct vs_service_device *service)
+{
+ struct vs_mbuf *mbuf;
+
+ lockdep_assert_held(&service->ready_lock);
+
+ cancel_work_sync(&service->rx_work);
+ tasklet_kill(&service->rx_tasklet);
+
+ spin_lock_irq(&service->rx_lock);
+ while (!list_empty(&service->rx_queue)) {
+ mbuf = list_first_entry(&service->rx_queue,
+ struct vs_mbuf, queue);
+ list_del_init(&mbuf->queue);
+ spin_unlock_irq(&service->rx_lock);
+ vs_service_free_mbuf(service, mbuf);
+ spin_lock_irq(&service->rx_lock);
+ }
+ service->tx_ready = false;
+ spin_unlock_irq(&service->rx_lock);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service);
+static unsigned long reset_cool_off(struct vs_service_device *service);
+
+static void service_cooloff_work(struct work_struct *work)
+{
+ struct vs_service_device *service = container_of(work,
+ struct vs_service_device, cooloff_work.work);
+ struct vs_session_device *session = vs_service_get_session(service);
+ unsigned long current_time = jiffies, wake_time;
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ if (reset_throttle_cooled_off(service)) {
+ vs_debug(VS_DEBUG_SESSION, session,
+ "Reset thrashing cooled off (delay = %u ms, cool off = %u ms, last reset %u ms ago, last reset request was %u ms ago)\n",
+ jiffies_to_msecs(service->reset_delay),
+ jiffies_to_msecs(reset_cool_off(service)),
+ msecs_ago(service->last_reset),
+ msecs_ago(service->last_reset_request));
+
+ service->reset_delay = 0;
+
+ /*
+ * If the service is already in reset, then queue_ready_work
+ * has already run and has deferred queuing of the ready_work
+ * until cooloff. Schedule the ready work to run immediately.
+ */
+ if (service->readiness == VS_SERVICE_RESET)
+ schedule_delayed_work(&service->ready_work, 0);
+ } else {
+ /*
+ * This can happen if last_reset_request has been bumped
+ * since the cooloff work was first queued. We need to
+ * work out how long it is until the service cools off,
+ * then reschedule ourselves.
+ */
+ wake_time = reset_cool_off(service) +
+ service->last_reset_request;
+
+ WARN_ON(time_after(current_time, wake_time));
+
+ schedule_delayed_work(&service->cooloff_work,
+ wake_time - current_time);
+ }
+
+ mutex_unlock(&service->ready_lock);
+}
+
+static void
+service_reset_work(struct work_struct *work)
+{
+ struct vs_service_device *service = container_of(work,
+ struct vs_service_device, reset_work);
+
+ service->pending_reset = false;
+
+ vs_service_reset(service, service);
+}
+
+/* Returns true if there are more messages to handle */
+static bool
+dequeue_and_handle_received_message(struct vs_service_device *service)
+{
+ struct vs_service_driver *driver =
+ to_vs_service_driver(service->dev.driver);
+ struct vs_session_device *session = vs_service_get_session(service);
+ const struct vs_transport_vtable *vt = session->transport->vt;
+ struct vs_service_stats *stats = &service->stats;
+ struct vs_mbuf *mbuf;
+ size_t size;
+ int ret;
+
+ /* Don't do rx work unless the service is active */
+ if (service->readiness != VS_SERVICE_ACTIVE)
+ return false;
+
+ /* Atomically take an item from the queue */
+ spin_lock_irq(&service->rx_lock);
+ if (!list_empty(&service->rx_queue)) {
+ mbuf = list_first_entry(&service->rx_queue, struct vs_mbuf,
+ queue);
+ list_del_init(&mbuf->queue);
+ spin_unlock_irq(&service->rx_lock);
+ size = vt->mbuf_size(mbuf);
+
+ /*
+ * Call the message handler for the service. The service's
+ * message handler is responsible for freeing the mbuf when it
+ * is done with it.
+ */
+ ret = driver->receive(service, mbuf);
+ if (ret < 0) {
+ atomic_inc(&service->stats.recv_failures);
+ dev_err(&service->dev,
+ "receive returned %d; resetting service\n",
+ ret);
+ vs_service_reset_nosync(service);
+ return false;
+ } else {
+ atomic_add(size, &service->stats.recv_bytes);
+ atomic_inc(&service->stats.recv_mbufs);
+ }
+
+ } else if (service->tx_ready) {
+ service->tx_ready = false;
+ spin_unlock_irq(&service->rx_lock);
+
+ /*
+ * Update the tx_ready stats accounting and then call the
+ * service's tx_ready handler.
+ */
+ atomic_inc(&stats->nr_tx_ready);
+ if (atomic_read(&stats->nr_over_quota) > 0) {
+ int total;
+
+ total = atomic_add_return(jiffies_to_msecs(jiffies -
+ stats->over_quota_time),
+ &stats->over_quota_time_total);
+ atomic_set(&stats->over_quota_time_avg, total /
+ atomic_read(&stats->nr_over_quota));
+ }
+ atomic_set(&service->is_over_quota, 0);
+
+ /*
+ * Note that a service's quota may reduce at any point, even
+ * during the tx_ready handler. This is important if a service
+ * has an ordered list of pending messages to send. If a
+ * message fails to send from the tx_ready handler due to
+ * over-quota then subsequent messages in the same handler may
+ * send successfully. To avoid sending messages in the
+ * incorrect order the service's tx_ready handler should
+ * return immediately if a message fails to send.
+ */
+ ret = driver->tx_ready(service);
+ if (ret < 0) {
+ dev_err(&service->dev,
+ "tx_ready returned %d; resetting service\n",
+ ret);
+ vs_service_reset_nosync(service);
+ return false;
+ }
+ } else {
+ spin_unlock_irq(&service->rx_lock);
+ }
+
+ /*
+ * There's no need to lock for this list_empty: if we race
+ * with a msg enqueue, we'll be rescheduled by the other side,
+ * and if we race with a dequeue, we'll just do nothing when
+ * we run (or will be cancelled before we run).
+ */
+ return !list_empty(&service->rx_queue) || service->tx_ready;
+}
+
+static void service_rx_tasklet(unsigned long data)
+{
+ struct vs_service_device *service = (struct vs_service_device *)data;
+ bool resched;
+
+ /*
+ * There is no need to acquire the state spinlock or mutex here,
+ * because this tasklet is disabled when the lock is held. These
+ * are annotations for sparse and lockdep, respectively.
+ *
+ * We can't annotate the implicit mutex acquire because lockdep gets
+ * upset about inconsistent softirq states.
+ */
+ __acquire(service);
+ spin_acquire(&service->state_spinlock.dep_map, 0, 0, _THIS_IP_);
+
+ resched = dequeue_and_handle_received_message(service);
+
+ if (resched)
+ tasklet_schedule(&service->rx_tasklet);
+
+ spin_release(&service->state_spinlock.dep_map, 0, _THIS_IP_);
+ __release(service);
+}
+
+static void service_rx_work(struct work_struct *work)
+{
+ struct vs_service_device *service = container_of(work,
+ struct vs_service_device, rx_work);
+ bool requeue;
+
+ /*
+ * We must acquire the state mutex here to protect services that
+ * are using vs_service_state_lock().
+ *
+ * There is no need to acquire the spinlock, which is never used in
+ * drivers with task context receive handlers.
+ */
+ vs_service_state_lock(service);
+
+ requeue = dequeue_and_handle_received_message(service);
+
+ vs_service_state_unlock(service);
+
+ if (requeue)
+ queue_work(service->work_queue, work);
+}
+
+/*
+ * Service sysfs statistics counters. These files are all atomic_t, and
+ * read only, so we use a generator macro to avoid code duplication.
+ */
+#define service_stat_attr(__name) \
+ static ssize_t service_stat_##__name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+ { \
+ struct vs_service_device *service = \
+ to_vs_service_device(dev); \
+ \
+ return scnprintf(buf, PAGE_SIZE, "%u\n", \
+ atomic_read(&service->stats.__name)); \
+ } \
+ static DEVICE_ATTR(__name, S_IRUGO, \
+ service_stat_##__name##_show, NULL);
+
+service_stat_attr(sent_mbufs);
+service_stat_attr(sent_bytes);
+service_stat_attr(recv_mbufs);
+service_stat_attr(recv_bytes);
+service_stat_attr(nr_over_quota);
+service_stat_attr(nr_tx_ready);
+service_stat_attr(over_quota_time_total);
+service_stat_attr(over_quota_time_avg);
+
+static struct attribute *service_stat_dev_attrs[] = {
+ &dev_attr_sent_mbufs.attr,
+ &dev_attr_sent_bytes.attr,
+ &dev_attr_recv_mbufs.attr,
+ &dev_attr_recv_bytes.attr,
+ &dev_attr_nr_over_quota.attr,
+ &dev_attr_nr_tx_ready.attr,
+ &dev_attr_over_quota_time_total.attr,
+ &dev_attr_over_quota_time_avg.attr,
+ NULL,
+};
+
+static const struct attribute_group service_stat_attributes = {
+ .name = "stats",
+ .attrs = service_stat_dev_attrs,
+};
+
+static void delete_service(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ bool notify_on_destroy = true;
+
+ /* FIXME: Jira ticket SDK-3495 - philipd. */
+ /* This should be the caller's responsibility */
+ vs_get_service(service);
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ /*
+ * If we're on the client side, the service should already have been
+ * disabled at this point.
+ */
+ WARN_ON(service->id != 0 && !session->is_server &&
+ service->readiness != VS_SERVICE_DISABLED &&
+ service->readiness != VS_SERVICE_DELETED);
+
+ /*
+ * Make sure the service is not active, and notify the remote end if
+ * it needs to be reset. Note that we already hold the core service
+ * state lock iff this is a non-core service.
+ */
+ __reset_service(service, true);
+
+ /*
+ * If the remote end is aware that the service is inactive, we can
+ * delete right away; otherwise we need to wait for a notification
+ * that the service has reset.
+ */
+ switch (service->readiness) {
+ case VS_SERVICE_LOCAL_DELETE:
+ case VS_SERVICE_DELETED:
+ /* Nothing to do here */
+ mutex_unlock(&service->ready_lock);
+ vs_put_service(service);
+ return;
+ case VS_SERVICE_ACTIVE:
+ BUG();
+ break;
+ case VS_SERVICE_LOCAL_RESET:
+ service->readiness = VS_SERVICE_LOCAL_DELETE;
+ break;
+ case VS_SERVICE_INIT:
+ notify_on_destroy = false;
+ /* Fall through */
+ default:
+ service->readiness = VS_SERVICE_DELETED;
+ destroy_service(service, notify_on_destroy);
+ break;
+ }
+
+ mutex_unlock(&service->ready_lock);
+
+ /*
+ * Remove service syslink from
+ * sys/vservices/(<server>/<client>)-sessions/ directory
+ */
+ vs_service_remove_sysfs_entries(session, service);
+
+ sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+
+ /*
+ * On the client-side we need to release the service id as soon as
+ * the service is deleted. Otherwise the server may attempt to create
+ * a new service with this id.
+ */
+ if (!session->is_server)
+ vs_session_release_service_id(service);
+
+ device_del(&service->dev);
+ vs_put_service(service);
+}
+
+/**
+ * vs_service_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ * @caller: the service initiating deletion
+ *
+ * Services may only be deleted by their owner (on the server side), or by the
+ * core service. This function must not be called for the core service.
+ */
+int vs_service_delete(struct vs_service_device *service,
+ struct vs_service_device *caller)
+{
+ struct vs_session_device *session =
+ vs_service_get_session(service);
+ struct vs_service_device *core_service = session->core_service;
+
+ if (WARN_ON(!core_service))
+ return -ENODEV;
+
+ if (!service->id)
+ return -EINVAL;
+
+ if (caller != service->owner && caller != core_service)
+ return -EPERM;
+
+ delete_service(service);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_delete);
+
+/**
+ * vs_service_handle_delete - deactivate and start removing a service device
+ * @service: the service to delete
+ *
+ * This is a variant of vs_service_delete which must only be called by the
+ * core service. It is used by the core service client when a service_removed
+ * message is received.
+ */
+int vs_service_handle_delete(struct vs_service_device *service)
+{
+ struct vs_session_device *session __maybe_unused =
+ vs_service_get_session(service);
+ struct vs_service_device *core_service __maybe_unused =
+ session->core_service;
+
+ lockdep_assert_held(&core_service->state_mutex);
+
+ delete_service(service);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_handle_delete);
+
+static void service_cleanup_work(struct work_struct *work)
+{
+ struct vs_service_device *service = container_of(work,
+ struct vs_service_device, cleanup_work);
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "cleanup\n");
+
+ if (service->owner)
+ vs_put_service(service->owner);
+
+ /* Put our reference to the session */
+ if (service->dev.parent)
+ put_device(service->dev.parent);
+
+ tasklet_kill(&service->rx_tasklet);
+ cancel_work_sync(&service->rx_work);
+ cancel_delayed_work_sync(&service->cooloff_work);
+ cancel_delayed_work_sync(&service->ready_work);
+ cancel_work_sync(&service->reset_work);
+
+ if (service->work_queue)
+ destroy_workqueue(service->work_queue);
+
+ kfree(service->sysfs_name);
+ kfree(service->name);
+ kfree(service->protocol);
+ kfree(service);
+}
+
+static void vs_service_release(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+
+ vs_dev_debug(VS_DEBUG_SESSION, vs_service_get_session(service),
+ &service->dev, "release\n");
+
+ /*
+ * We need to defer cleanup to avoid a circular dependency between the
+ * core service's state lock (which can be held at this point, on the
+ * client side) and any non-core service's reset work (which we must
+ * cancel here, and which acquires the core service state lock).
+ */
+ schedule_work(&service->cleanup_work);
+}
+
+static int service_add_idr(struct vs_session_device *session,
+ struct vs_service_device *service, vs_service_id_t service_id)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+ int err, base_id, id;
+
+ if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID)
+ base_id = 1;
+ else
+ base_id = service_id;
+
+retry:
+ if (!idr_pre_get(&session->service_idr, GFP_KERNEL))
+ return -ENOMEM;
+
+ mutex_lock(&session->service_idr_lock);
+ err = idr_get_new_above(&session->service_idr, service, base_id, &id);
+ if (err == 0) {
+ if (service_id != VS_SERVICE_AUTO_ALLOCATE_ID &&
+ id != service_id) {
+ /* Failed to allocated the requested service id */
+ idr_remove(&session->service_idr, id);
+ mutex_unlock(&session->service_idr_lock);
+ return -EBUSY;
+ }
+ if (id > VS_MAX_SERVICE_ID) {
+ /* We are out of service ids */
+ idr_remove(&session->service_idr, id);
+ mutex_unlock(&session->service_idr_lock);
+ return -ENOSPC;
+ }
+ }
+ mutex_unlock(&session->service_idr_lock);
+ if (err == -EAGAIN)
+ goto retry;
+ if (err < 0)
+ return err;
+#else
+ int start, end, id;
+
+ if (service_id == VS_SERVICE_AUTO_ALLOCATE_ID) {
+ start = 1;
+ end = VS_MAX_SERVICES;
+ } else {
+ start = service_id;
+ end = service_id + 1;
+ }
+
+ mutex_lock(&session->service_idr_lock);
+ id = idr_alloc(&session->service_idr, service, start, end,
+ GFP_KERNEL);
+ mutex_unlock(&session->service_idr_lock);
+
+ if (id == -ENOSPC)
+ return -EBUSY;
+ else if (id < 0)
+ return id;
+#endif
+
+ service->id = id;
+ return 0;
+}
+
+static int
+vs_service_create_sysfs_entries(struct vs_session_device *session,
+ struct vs_service_device *service, vs_service_id_t id)
+{
+ int ret;
+ char *sysfs_name, *c;
+
+ /* Add a symlink to session device inside service device sysfs */
+ ret = sysfs_create_link(&service->dev.kobj, &session->dev.kobj,
+ VS_SESSION_SYMLINK_NAME);
+ if (ret) {
+ dev_err(&service->dev, "Error %d creating session symlink\n",
+ ret);
+ goto fail;
+ }
+
+ /* Get the length of the string for sysfs dir */
+ sysfs_name = kasprintf(GFP_KERNEL, "%s:%d", service->name, id);
+ if (!sysfs_name) {
+ ret = -ENOMEM;
+ goto fail_session_link;
+ }
+
+ /*
+ * We dont want to create symlinks with /'s which could get interpreted
+ * as another directory so replace all /'s with !'s
+ */
+ while ((c = strchr(sysfs_name, '/')))
+ *c = '!';
+ ret = sysfs_create_link(session->sysfs_entry, &service->dev.kobj,
+ sysfs_name);
+ if (ret)
+ goto fail_free_sysfs_name;
+
+ service->sysfs_name = sysfs_name;
+
+ return 0;
+
+fail_free_sysfs_name:
+ kfree(sysfs_name);
+fail_session_link:
+ sysfs_remove_link(&service->dev.kobj, VS_SESSION_SYMLINK_NAME);
+fail:
+ return ret;
+}
+
+/**
+ * vs_service_register - create and register a new vs_service_device
+ * @session: the session device that is the parent of the service
+ * @owner: the service responsible for managing the new service
+ * @service_id: the ID of the new service
+ * @name: the name of the new service
+ * @protocol: the protocol for the new service
+ * @plat_data: value to be assigned to (struct device *)->platform_data
+ *
+ * This function should only be called by a session driver that is bound to
+ * the given session.
+ *
+ * The given service_id must not have been passed to a prior successful
+ * vs_service_register call, unless the service ID has since been freed by a
+ * call to the session driver's service_removed callback.
+ *
+ * The core service state lock must not be held while calling this function.
+ */
+struct vs_service_device *vs_service_register(struct vs_session_device *session,
+ struct vs_service_device *owner, vs_service_id_t service_id,
+ const char *protocol, const char *name, const void *plat_data)
+{
+ struct vs_service_device *service;
+ struct vs_session_driver *session_drv;
+ int ret = -EIO;
+ char *c;
+
+ if (service_id && !owner) {
+ dev_err(&session->dev, "Non-core service must have an owner\n");
+ ret = -EINVAL;
+ goto fail;
+ } else if (!service_id && owner) {
+ dev_err(&session->dev, "Core service must not have an owner\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ if (!session->dev.driver)
+ goto fail;
+
+ session_drv = to_vs_session_driver(session->dev.driver);
+
+ service = kzalloc(sizeof(*service), GFP_KERNEL);
+ if (!service) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ INIT_LIST_HEAD(&service->rx_queue);
+ INIT_WORK(&service->rx_work, service_rx_work);
+ INIT_WORK(&service->reset_work, service_reset_work);
+ INIT_DELAYED_WORK(&service->ready_work, service_ready_work);
+ INIT_DELAYED_WORK(&service->cooloff_work, service_cooloff_work);
+ INIT_WORK(&service->cleanup_work, service_cleanup_work);
+ spin_lock_init(&service->rx_lock);
+ init_waitqueue_head(&service->quota_wq);
+
+ service->owner = vs_get_service(owner);
+
+ service->readiness = VS_SERVICE_INIT;
+ mutex_init(&service->ready_lock);
+ service->driver_probed = false;
+
+ /*
+ * Service state locks - A service is only allowed to use one of these
+ */
+ spin_lock_init(&service->state_spinlock);
+ mutex_init(&service->state_mutex);
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ service->state_spinlock_used = false;
+ service->state_mutex_used = false;
+#endif
+
+ /* Lock ordering
+ *
+ * The dependency order for the various service locks is as follows:
+ *
+ * cooloff_work
+ * reset_work
+ * ready_work
+ * ready_lock/0
+ * rx_work/0
+ * state_mutex/0
+ * ready_lock/1
+ * ...
+ * state_mutex/n
+ * state_spinlock
+ *
+ * The subclass is the service's rank in the hierarchy of
+ * service ownership. This results in core having subclass 0 on
+ * server-side and 1 on client-side. Services directly created
+ * by the core will have a lock subclass value of 2 for
+ * servers, 3 for clients. Services created by non-core
+ * services will have a lock subclass value of x + 1, where x
+ * is the lock subclass of the creator service. (e.g servers
+ * will have even numbered lock subclasses, clients will have
+ * odd numbered lock subclasses).
+ *
+ * If a service driver has any additional locks for protecting
+ * internal state, they will generally fit between state_mutex/n and
+ * ready_lock/n+1 on this list. For the core service, this applies to
+ * the session lock.
+ */
+
+ if (owner)
+ service->lock_subclass = owner->lock_subclass + 2;
+ else
+ service->lock_subclass = session->is_server ? 0 : 1;
+
+#ifdef CONFIG_LOCKDEP
+ if (service->lock_subclass >= MAX_LOCKDEP_SUBCLASSES) {
+ dev_warn(&session->dev, "Owner hierarchy is too deep, lockdep will fail\n");
+ } else {
+ /*
+ * We need to set the default subclass for the rx work,
+ * because the workqueue API doesn't (and can't) provide
+ * anything like lock_nested() for it.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)
+ /*
+ * Lockdep allows a specific lock's subclass to be set with
+ * the subclass argument to lockdep_init_map(). However, prior
+ * to Linux 3.3, that only works the first time it is called
+ * for a given class and subclass. So we have to fake it,
+ * putting every subclass in a different class, so the only
+ * thing that breaks is printing the subclass in lockdep
+ * warnings.
+ */
+ static struct lock_class_key
+ rx_work_keys[MAX_LOCKDEP_SUBCLASSES];
+ struct lock_class_key *key =
+ &rx_work_keys[service->lock_subclass];
+#else
+ struct lock_class_key *key = service->rx_work.lockdep_map.key;
+#endif
+
+ /*
+ * We can't use the lockdep_set_class() macro because the
+ * work's lockdep map is called .lockdep_map instead of
+ * .dep_map.
+ */
+ lockdep_init_map(&service->rx_work.lockdep_map,
+ "&service->rx_work", key,
+ service->lock_subclass);
+ }
+#endif
+
+ /*
+ * Copy the protocol and name. Remove any leading or trailing
+ * whitespace characters (including newlines) since the strings
+ * may have been passed via sysfs files.
+ */
+ if (protocol) {
+ service->protocol = kstrdup(protocol, GFP_KERNEL);
+ if (!service->protocol) {
+ ret = -ENOMEM;
+ goto fail_copy_protocol;
+ }
+ c = strim(service->protocol);
+ if (c != service->protocol)
+ memmove(service->protocol, c,
+ strlen(service->protocol) + 1);
+ }
+
+ service->name = kstrdup(name, GFP_KERNEL);
+ if (!service->name) {
+ ret = -ENOMEM;
+ goto fail_copy_name;
+ }
+ c = strim(service->name);
+ if (c != service->name)
+ memmove(service->name, c, strlen(service->name) + 1);
+
+ service->is_server = session_drv->is_server;
+
+ /* Grab a reference to the session we are on */
+ service->dev.parent = get_device(&session->dev);
+ service->dev.bus = session_drv->service_bus;
+ service->dev.release = vs_service_release;
+
+ service->last_reset = 0;
+ service->last_reset_request = 0;
+ service->last_ready = 0;
+ service->reset_delay = 0;
+
+ device_initialize(&service->dev);
+ service->dev.platform_data = (void *)plat_data;
+
+ ret = service_add_idr(session, service, service_id);
+ if (ret)
+ goto fail_add_idr;
+
+#ifdef CONFIG_VSERVICES_NAMED_DEVICE
+ /* Integrate session and service names in vservice devnodes */
+ dev_set_name(&service->dev, "vservice-%s:%s:%s:%d:%d",
+ session->is_server ? "server" : "client",
+ session->name, service->name,
+ session->session_num, service->id);
+#else
+ dev_set_name(&service->dev, "%s:%d", dev_name(&session->dev),
+ service->id);
+#endif
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+ if (service->id > 0)
+ service->dev.devt = MKDEV(vservices_cdev_major,
+ (session->session_num * VS_MAX_SERVICES) +
+ service->id);
+#endif
+
+ service->work_queue = vs_create_workqueue(dev_name(&service->dev));
+ if (!service->work_queue) {
+ ret = -ENOMEM;
+ goto fail_create_workqueue;
+ }
+
+ tasklet_init(&service->rx_tasklet, service_rx_tasklet,
+ (unsigned long)service);
+
+ /*
+ * If this is the core service, set the core service pointer in the
+ * session.
+ */
+ if (service->id == 0) {
+ mutex_lock(&session->service_idr_lock);
+ if (session->core_service) {
+ ret = -EEXIST;
+ mutex_unlock(&session->service_idr_lock);
+ goto fail_become_core;
+ }
+
+ /* Put in vs_session_bus_remove() */
+ session->core_service = vs_get_service(service);
+ mutex_unlock(&session->service_idr_lock);
+ }
+
+ /* Notify the transport */
+ ret = session->transport->vt->service_add(session->transport, service);
+ if (ret) {
+ dev_err(&session->dev,
+ "Failed to add service %d (%s:%s) to transport: %d\n",
+ service->id, service->name,
+ service->protocol, ret);
+ goto fail_transport_add;
+ }
+
+ /* Delay uevent until vs_service_start(). */
+ dev_set_uevent_suppress(&service->dev, true);
+
+ ret = device_add(&service->dev);
+ if (ret)
+ goto fail_device_add;
+
+ /* Create the service statistics sysfs group */
+ ret = sysfs_create_group(&service->dev.kobj, &service_stat_attributes);
+ if (ret)
+ goto fail_sysfs_create_group;
+
+ /* Create additional sysfs files */
+ ret = vs_service_create_sysfs_entries(session, service, service->id);
+ if (ret)
+ goto fail_sysfs_add_entries;
+
+ return service;
+
+fail_sysfs_add_entries:
+ sysfs_remove_group(&service->dev.kobj, &service_stat_attributes);
+fail_sysfs_create_group:
+ device_del(&service->dev);
+fail_device_add:
+ session->transport->vt->service_remove(session->transport, service);
+fail_transport_add:
+ if (service->id == 0) {
+ session->core_service = NULL;
+ vs_put_service(service);
+ }
+fail_become_core:
+fail_create_workqueue:
+ vs_session_release_service_id(service);
+fail_add_idr:
+ /*
+ * device_initialize() has been called, so we must call put_device()
+ * and let vs_service_release() handle the rest of the cleanup.
+ */
+ put_device(&service->dev);
+ return ERR_PTR(ret);
+
+fail_copy_name:
+ if (service->protocol)
+ kfree(service->protocol);
+fail_copy_protocol:
+ kfree(service);
+fail:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(vs_service_register);
+
+/**
+ * vs_session_get_service - Look up a service by ID on a session and get
+ * a reference to it. The caller must call vs_put_service when it is finished
+ * with the service.
+ *
+ * @session: The session to search for the service on
+ * @service_id: ID of the service to find
+ */
+struct vs_service_device *
+vs_session_get_service(struct vs_session_device *session,
+ vs_service_id_t service_id)
+{
+ struct vs_service_device *service;
+
+ if (!session)
+ return NULL;
+
+ rcu_read_lock();
+ service = idr_find(&session->service_idr, service_id);
+ if (!service) {
+ rcu_read_unlock();
+ return NULL;
+ }
+ vs_get_service(service);
+ rcu_read_unlock();
+
+ return service;
+}
+EXPORT_SYMBOL_GPL(vs_session_get_service);
+
+/**
+ * __for_each_service - Iterate over all non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ *
+ * Iterate over all services on a session, excluding the core service, and
+ * call a callback function on each.
+ */
+static void __for_each_service(struct vs_session_device *session,
+ void (*func)(struct vs_service_device *))
+{
+ struct vs_service_device *service;
+ int id;
+
+ for (id = 1; ; id++) {
+ rcu_read_lock();
+ service = idr_get_next(&session->service_idr, &id);
+ if (!service) {
+ rcu_read_unlock();
+ break;
+ }
+ vs_get_service(service);
+ rcu_read_unlock();
+
+ func(service);
+ vs_put_service(service);
+ }
+}
+
+/**
+ * vs_session_delete_noncore - immediately delete all non-core services
+ * @session: the session whose services are to be deleted
+ *
+ * This function disables and deletes all non-core services without notifying
+ * the core service. It must only be called by the core service, with its state
+ * lock held. It is used when the core service client disconnects or
+ * resets, and when the core service server has its driver removed.
+ */
+void vs_session_delete_noncore(struct vs_session_device *session)
+{
+ struct vs_service_device *core_service __maybe_unused =
+ session->core_service;
+
+ lockdep_assert_held(&core_service->state_mutex);
+
+ vs_session_disable_noncore(session);
+
+ __for_each_service(session, delete_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_delete_noncore);
+
+/**
+ * vs_session_for_each_service - Iterate over all initialised and non-deleted
+ * non-core services on a session.
+ *
+ * @session: Session to iterate services on
+ * @func: Callback function for each iterated service
+ * @data: Extra data to pass to the callback
+ *
+ * Iterate over all services on a session, excluding the core service and any
+ * service that has been deleted or has not yet had vs_service_start() called,
+ * and call a callback function on each. The callback function is called with
+ * the service's ready lock held.
+ */
+void vs_session_for_each_service(struct vs_session_device *session,
+ void (*func)(struct vs_service_device *, void *), void *data)
+{
+ struct vs_service_device *service;
+ int id;
+
+ for (id = 1; ; id++) {
+ rcu_read_lock();
+ service = idr_get_next(&session->service_idr, &id);
+ if (!service) {
+ rcu_read_unlock();
+ break;
+ }
+ vs_get_service(service);
+ rcu_read_unlock();
+
+ mutex_lock_nested(&service->ready_lock, service->lock_subclass);
+
+ if (service->readiness != VS_SERVICE_LOCAL_DELETE &&
+ service->readiness != VS_SERVICE_DELETED &&
+ service->readiness != VS_SERVICE_INIT)
+ func(service, data);
+
+ mutex_unlock(&service->ready_lock);
+ vs_put_service(service);
+ }
+}
+
+static void force_disable_service(struct vs_service_device *service,
+ void *unused)
+{
+ lockdep_assert_held(&service->ready_lock);
+
+ if (service->readiness == VS_SERVICE_ACTIVE)
+ __reset_service(service, false);
+
+ disable_service(service, true);
+}
+
+/**
+ * vs_session_disable_noncore - immediately disable all non-core services
+ * @session: the session whose services are to be disabled
+ *
+ * This function must be called by the core service driver to disable all
+ * services, whenever it resets or is otherwise disconnected. It is called
+ * directly by the server-side core service, and by the client-side core
+ * service via vs_session_delete_noncore().
+ */
+void vs_session_disable_noncore(struct vs_session_device *session)
+{
+ vs_session_for_each_service(session, force_disable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_disable_noncore);
+
+static void try_enable_service(struct vs_service_device *service, void *unused)
+{
+ lockdep_assert_held(&service->ready_lock);
+
+ __enable_service(service);
+}
+
+/**
+ * vs_session_enable_noncore - enable all disabled non-core services
+ * @session: the session whose services are to be enabled
+ *
+ * This function is called by the core server driver to enable all services
+ * when the core client connects.
+ */
+void vs_session_enable_noncore(struct vs_session_device *session)
+{
+ vs_session_for_each_service(session, try_enable_service, NULL);
+}
+EXPORT_SYMBOL_GPL(vs_session_enable_noncore);
+
+/**
+ * vs_session_handle_message - process an incoming message from a transport
+ * @session: the session that is receiving the message
+ * @mbuf: a buffer containing the message payload
+ * @service_id: the id of the service that the message was addressed to
+ *
+ * This routine will return 0 if the buffer was accepted, or a negative value
+ * otherwise. In the latter case the caller should free the buffer. If the
+ * error is fatal, this routine will reset the service.
+ *
+ * This routine may be called from interrupt context.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ */
+int vs_session_handle_message(struct vs_session_device *session,
+ struct vs_mbuf *mbuf, vs_service_id_t service_id)
+{
+ struct vs_service_device *service;
+ struct vs_transport *transport;
+ unsigned long flags;
+
+ transport = session->transport;
+
+ service = vs_session_get_service(session, service_id);
+ if (!service) {
+ dev_err(&session->dev, "message for unknown service %d\n",
+ service_id);
+ session_fatal_error(session, GFP_ATOMIC);
+ return -ENOTCONN;
+ }
+
+ /*
+ * Take the rx lock before checking service readiness. This guarantees
+ * that if __reset_service() has just made the service inactive, we
+ * either see it and don't enqueue the message, or else enqueue the
+ * message before cancel_pending_rx() runs (and removes it).
+ */
+ spin_lock_irqsave(&service->rx_lock, flags);
+
+ /* If the service is not active, drop the message. */
+ if (service->readiness != VS_SERVICE_ACTIVE) {
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+ vs_put_service(service);
+ return -ECONNRESET;
+ }
+
+ list_add_tail(&mbuf->queue, &service->rx_queue);
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+
+ /* Schedule processing of the message by the service's drivers. */
+ queue_rx_work(service);
+ vs_put_service(service);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_message);
+
+/**
+ * vs_session_quota_available - notify a service that it can transmit
+ * @session: the session owning the service that is ready
+ * @service_id: the id of the service that is ready
+ * @count: the number of buffers that just became ready
+ * @call_tx_ready: true if quota has just become nonzero due to a buffer being
+ * freed by the remote communication partner
+ *
+ * This routine is called by the transport driver when a send-direction
+ * message buffer becomes free. It wakes up any task that is waiting for
+ * send quota to become available.
+ *
+ * This routine may be called from interrupt context from the transport
+ * driver, and as such, it may not sleep.
+ *
+ * The caller must always serialise calls to this function relative to
+ * vs_session_handle_reset and vs_session_handle_activate. We don't do this
+ * internally, to avoid having to disable interrupts when called from task
+ * context.
+ *
+ * If the call_tx_ready argument is true, this function also schedules a
+ * call to the driver's tx_ready callback. Note that this never has priority
+ * over handling incoming messages; it will only be handled once the receive
+ * queue is empty. This is to increase batching of outgoing messages, and also
+ * to reduce the chance that an outgoing message will be dropped by the partner
+ * because an incoming message has already changed the state.
+ *
+ * In general, task context drivers should use the waitqueue, and softirq
+ * context drivers (with tx_atomic set) should use tx_ready.
+ */
+void vs_session_quota_available(struct vs_session_device *session,
+ vs_service_id_t service_id, unsigned count,
+ bool send_tx_ready)
+{
+ struct vs_service_device *service;
+ unsigned long flags;
+
+ service = vs_session_get_service(session, service_id);
+ if (!service) {
+ dev_err(&session->dev, "tx ready for unknown service %d\n",
+ service_id);
+ session_fatal_error(session, GFP_ATOMIC);
+ return;
+ }
+
+ wake_up_nr(&service->quota_wq, count);
+
+ if (send_tx_ready) {
+ /*
+ * Take the rx lock before checking service readiness. This
+ * guarantees that if __reset_service() has just made the
+ * service inactive, we either see it and don't set the tx_ready
+ * flag, or else set the flag before cancel_pending_rx() runs
+ * (and clears it).
+ */
+ spin_lock_irqsave(&service->rx_lock, flags);
+
+ /* If the service is not active, drop the tx_ready event */
+ if (service->readiness != VS_SERVICE_ACTIVE) {
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+ vs_put_service(service);
+ return;
+ }
+
+ service->tx_ready = true;
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+
+ /* Schedule RX processing by the service driver. */
+ queue_rx_work(service);
+ }
+
+ vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_quota_available);
+
+/**
+ * vs_session_handle_notify - process an incoming notification from a transport
+ * @session: the session that is receiving the notification
+ * @flags: notification flags
+ * @service_id: the id of the service that the notification was addressed to
+ *
+ * This function may be called from interrupt context from the transport driver,
+ * and as such, it may not sleep.
+ */
+void vs_session_handle_notify(struct vs_session_device *session,
+ unsigned long bits, vs_service_id_t service_id)
+{
+ struct vs_service_device *service;
+ struct vs_service_driver *driver;
+ unsigned long flags;
+
+ service = vs_session_get_service(session, service_id);
+ if (!service) {
+ /* Ignore the notification since the service id doesn't exist */
+ dev_err(&session->dev, "notification for unknown service %d\n",
+ service_id);
+ return;
+ }
+
+ /*
+ * Take the rx lock before checking service readiness. This guarantees
+ * that if __reset_service() has just made the service inactive, we
+ * either see it and don't send the notification, or else send it
+ * before cancel_pending_rx() runs (and thus before the driver is
+ * deactivated).
+ */
+ spin_lock_irqsave(&service->rx_lock, flags);
+
+ /* If the service is not active, drop the notification. */
+ if (service->readiness != VS_SERVICE_ACTIVE) {
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+ vs_put_service(service);
+ return;
+ }
+
+ /* There should be a driver bound on the service */
+ if (WARN_ON(!service->dev.driver)) {
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+ vs_put_service(service);
+ return;
+ }
+
+ driver = to_vs_service_driver(service->dev.driver);
+ /* Call the driver's notify function */
+ driver->notify(service, bits);
+
+ spin_unlock_irqrestore(&service->rx_lock, flags);
+ vs_put_service(service);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_notify);
+
+static unsigned long reset_cool_off(struct vs_service_device *service)
+{
+ return service->reset_delay * RESET_THROTTLE_COOL_OFF_MULT;
+}
+
+static bool ready_needs_delay(struct vs_service_device *service)
+{
+ /*
+ * We throttle resets if too little time elapsed between the service
+ * last becoming ready, and the service last starting a reset.
+ *
+ * We do not use the current time here because it includes the time
+ * taken by the local service driver to actually process the reset.
+ */
+ return service->last_reset && service->last_ready && time_before(
+ service->last_reset,
+ service->last_ready + RESET_THROTTLE_TIME);
+}
+
+static bool reset_throttle_cooled_off(struct vs_service_device *service)
+{
+ /*
+ * Reset throttling cools off if enough time has elapsed since the
+ * last reset request.
+ *
+ * We check against the last requested reset, not the last serviced
+ * reset or ready. If we are throttling, a reset may not have been
+ * serviced for some time even though we are still receiving requests.
+ */
+ return service->reset_delay && service->last_reset_request &&
+ time_after(jiffies, service->last_reset_request +
+ reset_cool_off(service));
+}
+
+/*
+ * Queue up the ready work for a service. If a service is resetting too fast
+ * then it will be throttled using an exponentially increasing delay before
+ * marking it ready. If the reset speed backs off then the ready throttling
+ * will be cleared. If a service reaches the maximum throttling delay then all
+ * resets will be ignored until the cool off period has elapsed.
+ *
+ * The basic logic of the reset throttling is:
+ *
+ * - If a reset request is processed and the last ready was less than
+ * RESET_THROTTLE_TIME ago, then the ready needs to be delayed to
+ * throttle resets.
+ *
+ * - The ready delay increases exponentially on each throttled reset
+ * between RESET_THROTTLE_MIN and RESET_THROTTLE_MAX.
+ *
+ * - If RESET_THROTTLE_MAX is reached then no ready will be sent until the
+ * reset requests have cooled off.
+ *
+ * - Reset requests have cooled off when no reset requests have been
+ * received for RESET_THROTTLE_COOL_OFF_MULT * the service's current
+ * ready delay. The service's reset throttling is disabled.
+ *
+ * Note: Be careful when adding print statements, including debugging, to
+ * this function. The ready throttling is intended to prevent DOSing of the
+ * vServices due to repeated resets (e.g. because of a persistent failure).
+ * Adding a printk on each reset for example would reset in syslog spamming
+ * which is a DOS attack in itself.
+ *
+ * The ready lock must be held by the caller.
+ */
+static void queue_ready_work(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ unsigned long delay;
+ bool wait_for_cooloff = false;
+
+ lockdep_assert_held(&service->ready_lock);
+
+ /* This should only be called when the service enters reset. */
+ WARN_ON(service->readiness != VS_SERVICE_RESET);
+
+ if (ready_needs_delay(service)) {
+ /* Reset delay increments exponentially */
+ if (!service->reset_delay) {
+ service->reset_delay = RESET_THROTTLE_MIN;
+ } else if (service->reset_delay < RESET_THROTTLE_MAX) {
+ service->reset_delay *= 2;
+ } else {
+ wait_for_cooloff = true;
+ }
+
+ delay = service->reset_delay;
+ } else {
+ /* The reset request appears to have been be sane. */
+ delay = 0;
+
+ }
+
+ if (service->reset_delay > 0) {
+ /*
+ * Schedule cooloff work, to set the reset_delay to 0 if
+ * the reset requests stop for long enough.
+ */
+ schedule_delayed_work(&service->cooloff_work,
+ reset_cool_off(service));
+ }
+
+ if (wait_for_cooloff) {
+ /*
+ * We need to finish cooling off before we service resets
+ * again. Schedule cooloff_work to run after the current
+ * cooloff period ends; it may reschedule itself even later
+ * if any more requests arrive.
+ */
+ dev_err(&session->dev,
+ "Service %s is resetting too fast - must cool off for %u ms\n",
+ dev_name(&service->dev),
+ jiffies_to_msecs(reset_cool_off(service)));
+ return;
+ }
+
+ if (delay)
+ dev_err(&session->dev,
+ "Service %s is resetting too fast - delaying ready by %u ms\n",
+ dev_name(&service->dev),
+ jiffies_to_msecs(delay));
+
+ vs_debug(VS_DEBUG_SESSION, session,
+ "Service %s will become ready in %u ms\n",
+ dev_name(&service->dev),
+ jiffies_to_msecs(delay));
+
+ if (service->last_ready)
+ vs_debug(VS_DEBUG_SESSION, session,
+ "Last became ready %u ms ago\n",
+ msecs_ago(service->last_ready));
+ if (service->reset_delay >= RESET_THROTTLE_MAX)
+ dev_err(&session->dev, "Service %s hit max reset throttle\n",
+ dev_name(&service->dev));
+
+ schedule_delayed_work(&service->ready_work, delay);
+}
+
+static void session_activation_work(struct work_struct *work)
+{
+ struct vs_session_device *session = container_of(work,
+ struct vs_session_device, activation_work);
+ struct vs_service_device *core_service = session->core_service;
+ struct vs_session_driver *session_drv =
+ to_vs_session_driver(session->dev.driver);
+ int activation_state;
+ int ret;
+
+ if (WARN_ON(!core_service))
+ return;
+
+ if (WARN_ON(!session_drv))
+ return;
+
+ /*
+ * We use an atomic to prevent duplicate activations if we race with
+ * an activate after a reset. This is very unlikely, but possible if
+ * this work item is preempted.
+ */
+ activation_state = atomic_cmpxchg(&session->activation_state,
+ VS_SESSION_ACTIVATE, VS_SESSION_ACTIVE);
+
+ switch (activation_state) {
+ case VS_SESSION_ACTIVATE:
+ vs_debug(VS_DEBUG_SESSION, session,
+ "core service will be activated\n");
+ vs_service_enable(core_service);
+ break;
+
+ case VS_SESSION_RESET:
+ vs_debug(VS_DEBUG_SESSION, session,
+ "core service will be deactivated\n");
+
+ /* Handle the core service reset */
+ ret = service_handle_reset(session, core_service, true);
+
+ /* Tell the transport if the reset succeeded */
+ if (ret >= 0)
+ session->transport->vt->ready(session->transport);
+ else
+ dev_err(&session->dev, "core service reset unhandled: %d\n",
+ ret);
+
+ break;
+
+ default:
+ vs_debug(VS_DEBUG_SESSION, session,
+ "core service already active\n");
+ break;
+ }
+}
+
+/**
+ * vs_session_handle_reset - Handle a reset at the session layer.
+ * @session: Session to reset
+ *
+ * This function is called by the transport when it receives a transport-level
+ * reset notification.
+ *
+ * After a session is reset by calling this function, it will reset all of its
+ * attached services, and then call the transport's ready callback. The
+ * services will remain in reset until the session is re-activated by a call
+ * to vs_session_handle_activate().
+ *
+ * Calling this function on a session that is already reset is permitted, as
+ * long as the transport accepts the consequent duplicate ready callbacks.
+ *
+ * A newly created session is initially in the reset state, and will not call
+ * the transport's ready callback. The transport may choose to either act as
+ * if the ready callback had been called, or call this function again to
+ * trigger a new ready callback.
+ */
+void vs_session_handle_reset(struct vs_session_device *session)
+{
+ atomic_set(&session->activation_state, VS_SESSION_RESET);
+
+ schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_reset);
+
+/**
+ * vs_session_handle_activate - Allow a session to leave the reset state.
+ * @session: Session to mark active.
+ *
+ * This function is called by the transport when a transport-level reset is
+ * completed; that is, after the session layer has reset its services and
+ * called the ready callback, at *both* ends of the connection.
+ */
+void vs_session_handle_activate(struct vs_session_device *session)
+{
+ atomic_set(&session->activation_state, VS_SESSION_ACTIVATE);
+
+ schedule_work(&session->activation_work);
+}
+EXPORT_SYMBOL_GPL(vs_session_handle_activate);
+
+static ssize_t id_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", session->session_num);
+}
+
+/*
+ * The vServices session device type
+ */
+static ssize_t is_server_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%d\n", session->is_server);
+}
+
+static ssize_t name_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%s\n", session->name);
+}
+
+#ifdef CONFIG_VSERVICES_DEBUG
+static ssize_t debug_mask_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ return scnprintf(buf, PAGE_SIZE, "%.8lx\n", session->debug_mask);
+}
+
+static ssize_t debug_mask_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t count)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+ int err;
+
+ err = kstrtoul(buf, 0, &session->debug_mask);
+ if (err)
+ return err;
+
+ /* Clear any bits we don't know about */
+ session->debug_mask &= VS_DEBUG_ALL;
+
+ return count;
+}
+#endif /* CONFIG_VSERVICES_DEBUG */
+
+static struct device_attribute vservices_session_dev_attrs[] = {
+ __ATTR_RO(id),
+ __ATTR_RO(is_server),
+ __ATTR_RO(name),
+#ifdef CONFIG_VSERVICES_DEBUG
+ __ATTR(debug_mask, S_IRUGO | S_IWUSR,
+ debug_mask_show, debug_mask_store),
+#endif
+ __ATTR_NULL,
+};
+
+static int vs_session_free_idr(struct vs_session_device *session)
+{
+ mutex_lock(&vs_session_lock);
+ idr_remove(&session_idr, session->session_num);
+ mutex_unlock(&vs_session_lock);
+ return 0;
+}
+
+static void vs_session_device_release(struct device *dev)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ vs_session_free_idr(session);
+
+ kfree(session->name);
+ kfree(session);
+}
+
+/*
+ * The vServices session bus
+ */
+static int vs_session_bus_match(struct device *dev,
+ struct device_driver *driver)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+ struct vs_session_driver *session_drv = to_vs_session_driver(driver);
+
+ return (session->is_server == session_drv->is_server);
+}
+
+static int vs_session_bus_remove(struct device *dev)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+ struct vs_service_device *core_service = session->core_service;
+
+ if (!core_service)
+ return 0;
+
+ /*
+ * Abort any pending session activation. We rely on the transport to
+ * not call vs_session_handle_activate after this point.
+ */
+ cancel_work_sync(&session->activation_work);
+
+ /* Abort any pending fatal error handling, which is redundant now. */
+ cancel_work_sync(&session->fatal_error_work);
+
+ /*
+ * Delete the core service. This will implicitly delete everything
+ * else (in reset on the client side, and in release on the server
+ * side). The session holds a reference, so this won't release the
+ * service struct.
+ */
+ delete_service(core_service);
+
+ /* Now clean up the core service. */
+ session->core_service = NULL;
+
+ /* Matches the get in vs_service_register() */
+ vs_put_service(core_service);
+
+ return 0;
+}
+
+static int vservices_session_uevent(struct device *dev,
+ struct kobj_uevent_env *env)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ dev_dbg(dev, "uevent\n");
+
+ if (add_uevent_var(env, "IS_SERVER=%d", session->is_server))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+ return -ENOMEM;
+
+ return 0;
+}
+
+static void vservices_session_shutdown(struct device *dev)
+{
+ struct vs_session_device *session = to_vs_session_device(dev);
+
+ dev_dbg(dev, "shutdown\n");
+
+ /* Do a transport reset */
+ session->transport->vt->reset(session->transport);
+}
+
+struct bus_type vs_session_bus_type = {
+ .name = "vservices-session",
+ .match = vs_session_bus_match,
+ .remove = vs_session_bus_remove,
+ .dev_attrs = vservices_session_dev_attrs,
+ .uevent = vservices_session_uevent,
+ .shutdown = vservices_session_shutdown,
+};
+EXPORT_SYMBOL_GPL(vs_session_bus_type);
+
+/*
+ * Common code for the vServices client and server buses
+ */
+int vs_service_bus_probe(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+ struct vs_session_device *session = vs_service_get_session(service);
+ int ret;
+
+ vs_dev_debug(VS_DEBUG_SESSION, session, &service->dev, "probe\n");
+
+ /*
+ * Increase the reference count on the service driver. We don't allow
+ * service driver modules to be removed if there are any device
+ * instances present. The devices must be explicitly removed first.
+ */
+ if (!try_module_get(vsdrv->driver.owner))
+ return -ENODEV;
+
+ ret = vsdrv->probe(service);
+ if (ret) {
+ module_put(vsdrv->driver.owner);
+ return ret;
+ }
+
+ service->driver_probed = true;
+
+ try_start_service(service);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_probe);
+
+int vs_service_bus_remove(struct device *dev)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_service_driver *vsdrv = to_vs_service_driver(dev->driver);
+ int err = 0;
+
+ reset_service(service);
+
+ /* Prevent reactivation of the driver */
+ service->driver_probed = false;
+
+ /* The driver has now had its reset() callback called; remove it */
+ vsdrv->remove(service);
+
+ /*
+ * Take the service's state mutex and spinlock. This ensures that any
+ * thread that is calling vs_state_lock_safe[_bh] will either complete
+ * now, or see the driver removal and fail, irrespective of which type
+ * of lock it is using.
+ */
+ mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+ spin_lock_bh(&service->state_spinlock);
+
+ /* Release all the locks. */
+ spin_unlock_bh(&service->state_spinlock);
+ mutex_unlock(&service->state_mutex);
+ mutex_unlock(&service->ready_lock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ service->state_spinlock_used = false;
+ service->state_mutex_used = false;
+#endif
+
+ module_put(vsdrv->driver.owner);
+
+ return err;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_remove);
+
+int vs_service_bus_uevent(struct device *dev, struct kobj_uevent_env *env)
+{
+ struct vs_service_device *service = to_vs_service_device(dev);
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ dev_dbg(dev, "uevent\n");
+
+ if (add_uevent_var(env, "IS_SERVER=%d", service->is_server))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "SERVICE_ID=%d", service->id))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "SESSION_ID=%d", session->session_num))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "SERVICE_NAME=%s", service->name))
+ return -ENOMEM;
+
+ if (add_uevent_var(env, "PROTOCOL=%s", service->protocol ?: ""))
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_service_bus_uevent);
+
+static int vs_session_create_sysfs_entry(struct vs_transport *transport,
+ struct vs_session_device *session, bool server,
+ const char *transport_name)
+{
+ char *sysfs_name;
+ struct kobject *sysfs_parent = vservices_client_root;
+
+ if (!transport_name)
+ return -EINVAL;
+
+ sysfs_name = kasprintf(GFP_KERNEL, "%s:%s", transport->type,
+ transport_name);
+ if (!sysfs_name)
+ return -ENOMEM;
+
+ if (server)
+ sysfs_parent = vservices_server_root;
+
+ session->sysfs_entry = kobject_create_and_add(sysfs_name, sysfs_parent);
+
+ kfree(sysfs_name);
+ if (!session->sysfs_entry)
+ return -ENOMEM;
+ return 0;
+}
+
+static int vs_session_alloc_idr(struct vs_session_device *session)
+{
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 9, 0)
+ int err, id;
+
+retry:
+ if (!idr_pre_get(&session_idr, GFP_KERNEL))
+ return -ENOMEM;
+
+ mutex_lock(&vs_session_lock);
+ err = idr_get_new_above(&session_idr, session, 0, &id);
+ if (err == 0) {
+ if (id >= VS_MAX_SESSIONS) {
+ /* We are out of session ids */
+ idr_remove(&session_idr, id);
+ mutex_unlock(&vs_session_lock);
+ return -EBUSY;
+ }
+ }
+ mutex_unlock(&vs_session_lock);
+ if (err == -EAGAIN)
+ goto retry;
+ if (err < 0)
+ return err;
+#else
+ int id;
+
+ mutex_lock(&vs_session_lock);
+ id = idr_alloc(&session_idr, session, 0, VS_MAX_SESSIONS, GFP_KERNEL);
+ mutex_unlock(&vs_session_lock);
+
+ if (id == -ENOSPC)
+ return -EBUSY;
+ else if (id < 0)
+ return id;
+#endif
+
+ session->session_num = id;
+ return 0;
+}
+
+/**
+ * vs_session_register - register a vservices session on a transport
+ * @transport: vservices transport that the session will attach to
+ * @parent: device that implements the transport (for sysfs)
+ * @server: true if the session is server-side
+ * @transport_name: name of the transport
+ *
+ * This function is intended to be called from the probe() function of a
+ * transport driver. It sets up a new session device, which then either
+ * performs automatic service discovery (for clients) or creates sysfs nodes
+ * that allow the user to create services (for servers).
+ *
+ * Note that the parent is only used by the driver framework; it is not
+ * directly accessed by the session drivers. Thus, a single transport device
+ * can support multiple sessions, as long as they each have a unique struct
+ * vs_transport.
+ *
+ * Note: This function may sleep, and therefore must not be called from
+ * interrupt context.
+ *
+ * Returns a pointer to the new device, or an error pointer.
+ */
+struct vs_session_device *vs_session_register(struct vs_transport *transport,
+ struct device *parent, bool server, const char *transport_name)
+{
+ struct device *dev;
+ struct vs_session_device *session;
+ int ret = -ENOMEM;
+
+ WARN_ON(!transport);
+
+ session = kzalloc(sizeof(*session), GFP_KERNEL);
+ if (!session)
+ goto fail_session_alloc;
+
+ session->transport = transport;
+ session->is_server = server;
+ session->name = kstrdup(transport_name, GFP_KERNEL);
+ if (!session->name)
+ goto fail_free_session;
+
+ INIT_WORK(&session->activation_work, session_activation_work);
+ INIT_WORK(&session->fatal_error_work, session_fatal_error_work);
+
+#ifdef CONFIG_VSERVICES_DEBUG
+ session->debug_mask = default_debug_mask & VS_DEBUG_ALL;
+#endif
+
+ idr_init(&session->service_idr);
+ mutex_init(&session->service_idr_lock);
+
+ /*
+ * We must create session sysfs entry before device_create
+ * so, that sysfs entry is available while registering
+ * core service.
+ */
+ ret = vs_session_create_sysfs_entry(transport, session, server,
+ transport_name);
+ if (ret)
+ goto fail_free_session;
+
+ ret = vs_session_alloc_idr(session);
+ if (ret)
+ goto fail_sysfs_entry;
+
+ dev = &session->dev;
+ dev->parent = parent;
+ dev->bus = &vs_session_bus_type;
+ dev->release = vs_session_device_release;
+ dev_set_name(dev, "vservice:%d", session->session_num);
+
+ ret = device_register(dev);
+ if (ret) {
+ goto fail_session_map;
+ }
+
+ /* Add a symlink to transport device inside session device sysfs dir */
+ if (parent) {
+ ret = sysfs_create_link(&session->dev.kobj,
+ &parent->kobj, VS_TRANSPORT_SYMLINK_NAME);
+ if (ret) {
+ dev_err(&session->dev,
+ "Error %d creating transport symlink\n",
+ ret);
+ goto fail_session_device_unregister;
+ }
+ }
+
+ return session;
+
+fail_session_device_unregister:
+ device_unregister(&session->dev);
+ kobject_put(session->sysfs_entry);
+ /* Remaining cleanup will be done in vs_session_release */
+ return ERR_PTR(ret);
+fail_session_map:
+ vs_session_free_idr(session);
+fail_sysfs_entry:
+ kobject_put(session->sysfs_entry);
+fail_free_session:
+ kfree(session->name);
+ kfree(session);
+fail_session_alloc:
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL(vs_session_register);
+
+void vs_session_start(struct vs_session_device *session)
+{
+ struct vs_service_device *core_service = session->core_service;
+
+ if (WARN_ON(!core_service))
+ return;
+
+ blocking_notifier_call_chain(&vs_session_notifier_list,
+ VS_SESSION_NOTIFY_ADD, session);
+
+ vs_service_start(core_service);
+}
+EXPORT_SYMBOL_GPL(vs_session_start);
+
+/**
+ * vs_session_unregister - unregister a session device
+ * @session: the session device to unregister
+ */
+void vs_session_unregister(struct vs_session_device *session)
+{
+ if (session->dev.parent)
+ sysfs_remove_link(&session->dev.kobj, VS_TRANSPORT_SYMLINK_NAME);
+ blocking_notifier_call_chain(&vs_session_notifier_list,
+ VS_SESSION_NOTIFY_REMOVE, session);
+
+ device_unregister(&session->dev);
+
+ kobject_put(session->sysfs_entry);
+}
+EXPORT_SYMBOL_GPL(vs_session_unregister);
+
+struct service_unbind_work_struct {
+ struct vs_service_device *service;
+ struct work_struct work;
+};
+
+static void service_unbind_work(struct work_struct *work)
+{
+ struct service_unbind_work_struct *unbind_work = container_of(work,
+ struct service_unbind_work_struct, work);
+
+ device_release_driver(&unbind_work->service->dev);
+
+ /* Matches vs_get_service() in vs_session_unbind_driver() */
+ vs_put_service(unbind_work->service);
+ kfree(unbind_work);
+}
+
+int vs_session_unbind_driver(struct vs_service_device *service)
+{
+ struct service_unbind_work_struct *unbind_work =
+ kmalloc(sizeof(*unbind_work), GFP_KERNEL);
+
+ if (!unbind_work)
+ return -ENOMEM;
+
+ INIT_WORK(&unbind_work->work, service_unbind_work);
+
+ /* Put in service_unbind_work() */
+ unbind_work->service = vs_get_service(service);
+ schedule_work(&unbind_work->work);
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(vs_session_unbind_driver);
+
+static int __init vservices_init(void)
+{
+ int r;
+
+ printk(KERN_INFO "vServices Framework 1.0\n");
+
+ vservices_root = kobject_create_and_add("vservices", NULL);
+ if (!vservices_root) {
+ r = -ENOMEM;
+ goto fail_create_root;
+ }
+
+ r = bus_register(&vs_session_bus_type);
+ if (r < 0)
+ goto fail_bus_register;
+
+ r = vs_devio_init();
+ if (r < 0)
+ goto fail_devio_init;
+
+ return 0;
+
+fail_devio_init:
+ bus_unregister(&vs_session_bus_type);
+fail_bus_register:
+ kobject_put(vservices_root);
+fail_create_root:
+ return r;
+}
+
+static void __exit vservices_exit(void)
+{
+ printk(KERN_INFO "vServices Framework exit\n");
+
+ vs_devio_exit();
+ bus_unregister(&vs_session_bus_type);
+ kobject_put(vservices_root);
+}
+
+subsys_initcall(vservices_init);
+module_exit(vservices_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Session");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/session.h b/drivers/vservices/session.h
new file mode 100644
index 0000000..f51d535
--- /dev/null
+++ b/drivers/vservices/session.h
@@ -0,0 +1,173 @@
+/*
+ * drivers/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Definitions related to the vservices session bus and its client and server
+ * session drivers. The interfaces in this file are implementation details of
+ * the vServices framework and should not be used by transport or service
+ * drivers.
+ */
+
+#ifndef _VSERVICES_SESSION_PRIV_H_
+#define _VSERVICES_SESSION_PRIV_H_
+
+/* Maximum number of sessions allowed */
+#define VS_MAX_SESSIONS 64
+
+#include "debug.h"
+
+/* For use by the core server */
+#define VS_SERVICE_AUTO_ALLOCATE_ID 0xffff
+#define VS_SERVICE_ALREADY_RESET 1
+
+/*
+ * The upper bits of the service id are reserved for transport driver specific
+ * use. The reserve bits are always zeroed out above the transport layer.
+ */
+#define VS_SERVICE_ID_TRANSPORT_BITS 4
+#define VS_SERVICE_ID_TRANSPORT_OFFSET 12
+#define VS_SERVICE_ID_TRANSPORT_MASK ((1 << VS_SERVICE_ID_TRANSPORT_BITS) - 1)
+#define VS_SERVICE_ID_MASK \
+ (~(VS_SERVICE_ID_TRANSPORT_MASK << VS_SERVICE_ID_TRANSPORT_OFFSET))
+
+/* Number of bits needed to represent the service id range as a bitmap. */
+#define VS_SERVICE_ID_BITMAP_BITS \
+ (1 << ((sizeof(vs_service_id_t) * 8) - VS_SERVICE_ID_TRANSPORT_BITS))
+
+/* High service ids are reserved for use by the transport drivers */
+#define VS_SERVICE_ID_RESERVED(x) \
+ ((1 << VS_SERVICE_ID_TRANSPORT_OFFSET) - (x))
+
+#define VS_SERVICE_ID_RESERVED_1 VS_SERVICE_ID_RESERVED(1)
+
+/* Name of the session device symlink in service device sysfs directory */
+#define VS_SESSION_SYMLINK_NAME "session"
+
+/* Name of the transport device symlink in session device sysfs directory */
+#define VS_TRANSPORT_SYMLINK_NAME "transport"
+
+static inline unsigned int
+vs_get_service_id_reserved_bits(vs_service_id_t service_id)
+{
+ return (service_id >> VS_SERVICE_ID_TRANSPORT_OFFSET) &
+ VS_SERVICE_ID_TRANSPORT_MASK;
+}
+
+static inline vs_service_id_t vs_get_real_service_id(vs_service_id_t service_id)
+{
+ return service_id & VS_SERVICE_ID_MASK;
+}
+
+static inline void vs_set_service_id_reserved_bits(vs_service_id_t *service_id,
+ unsigned int reserved_bits)
+{
+ *service_id &= ~(VS_SERVICE_ID_TRANSPORT_MASK <<
+ VS_SERVICE_ID_TRANSPORT_OFFSET);
+ *service_id |= (reserved_bits & VS_SERVICE_ID_TRANSPORT_MASK) <<
+ VS_SERVICE_ID_TRANSPORT_OFFSET;
+}
+
+extern struct bus_type vs_session_bus_type;
+extern struct kobject *vservices_root;
+extern struct kobject *vservices_server_root;
+extern struct kobject *vservices_client_root;
+
+/**
+ * struct vs_session_driver - Session driver
+ * @driver: Linux device model driver structure
+ * @service_bus: Pointer to either the server or client bus type
+ * @is_server: True if this driver is for a server session, false if it is for
+ * a client session
+ * @service_added: Called when a non-core service is added.
+ * @service_start: Called when a non-core service is started.
+ * @service_local_reset: Called when an active non-core service driver becomes
+ * inactive.
+ * @service_removed: Called when a non-core service is removed.
+ */
+struct vs_session_driver {
+ struct device_driver driver;
+ struct bus_type *service_bus;
+ bool is_server;
+
+ /* These are all called with the core service state lock held. */
+ int (*service_added)(struct vs_session_device *session,
+ struct vs_service_device *service);
+ int (*service_start)(struct vs_session_device *session,
+ struct vs_service_device *service);
+ int (*service_local_reset)(struct vs_session_device *session,
+ struct vs_service_device *service);
+ int (*service_removed)(struct vs_session_device *session,
+ struct vs_service_device *service);
+};
+
+#define to_vs_session_driver(drv) \
+ container_of(drv, struct vs_session_driver, driver)
+
+/* Service lookup */
+extern struct vs_service_device * vs_session_get_service(
+ struct vs_session_device *session,
+ vs_service_id_t service_id);
+
+/* Service creation & destruction */
+extern struct vs_service_device *
+vs_service_register(struct vs_session_device *session,
+ struct vs_service_device *parent,
+ vs_service_id_t service_id,
+ const char *protocol,
+ const char *name,
+ const void *plat_data);
+
+extern bool vs_service_start(struct vs_service_device *service);
+
+extern int vs_service_delete(struct vs_service_device *service,
+ struct vs_service_device *caller);
+
+extern int vs_service_handle_delete(struct vs_service_device *service);
+
+/* Service reset handling */
+extern int vs_service_handle_reset(struct vs_session_device *session,
+ vs_service_id_t service_id, bool disable);
+extern int vs_service_enable(struct vs_service_device *service);
+
+extern void vs_session_enable_noncore(struct vs_session_device *session);
+extern void vs_session_disable_noncore(struct vs_session_device *session);
+extern void vs_session_delete_noncore(struct vs_session_device *session);
+
+/* Service bus driver management */
+extern int vs_service_bus_probe(struct device *dev);
+extern int vs_service_bus_remove(struct device *dev);
+extern int vs_service_bus_uevent(struct device *dev,
+ struct kobj_uevent_env *env);
+
+#ifdef CONFIG_VSERVICES_CHAR_DEV
+
+extern int vs_devio_init(void);
+extern void vs_devio_exit(void);
+
+extern struct vs_service_device *vs_service_lookup_by_devt(dev_t dev);
+
+extern struct vs_service_driver vs_devio_server_driver;
+extern struct vs_service_driver vs_devio_client_driver;
+
+extern int vservices_cdev_major;
+
+#else /* !CONFIG_VSERVICES_CHAR_DEV */
+
+static inline int vs_devio_init(void)
+{
+ return 0;
+}
+
+static inline void vs_devio_exit(void)
+{
+}
+
+#endif /* !CONFIG_VSERVICES_CHAR_DEV */
+
+#endif /* _VSERVICES_SESSION_PRIV_H_ */
diff --git a/drivers/vservices/skeleton_driver.c b/drivers/vservices/skeleton_driver.c
new file mode 100644
index 0000000..cfbc5df
--- /dev/null
+++ b/drivers/vservices/skeleton_driver.c
@@ -0,0 +1,133 @@
+/*
+ * drivers/vservices/skeleton_driver.c
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Skeleton testing driver for templating vService client/server drivers
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+
+#include <vservices/session.h>
+#include <vservices/buffer.h>
+#include <vservices/service.h>
+
+struct skeleton_info {
+ unsigned dummy;
+};
+
+static void vs_skeleton_handle_start(struct vs_service_device *service)
+{
+ /* NOTE: Do not change this message - is it used for system testing */
+ dev_info(&service->dev, "skeleton handle_start\n");
+}
+
+static int vs_skeleton_handle_message(struct vs_service_device *service,
+ struct vs_mbuf *mbuf)
+{
+ dev_info(&service->dev, "skeleton handle_messasge\n");
+ return -EBADMSG;
+}
+
+static void vs_skeleton_handle_notify(struct vs_service_device *service,
+ u32 flags)
+{
+ dev_info(&service->dev, "skeleton handle_notify\n");
+}
+
+static void vs_skeleton_handle_reset(struct vs_service_device *service)
+{
+ dev_info(&service->dev, "skeleton handle_reset %s service %d\n",
+ service->is_server ? "server" : "client", service->id);
+}
+
+static int vs_skeleton_probe(struct vs_service_device *service)
+{
+ struct skeleton_info *info;
+ int err = -ENOMEM;
+
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
+ if (!info)
+ goto fail;
+
+ dev_set_drvdata(&service->dev, info);
+ return 0;
+
+fail:
+ return err;
+}
+
+static int vs_skeleton_remove(struct vs_service_device *service)
+{
+ struct skeleton_info *info = dev_get_drvdata(&service->dev);
+
+ dev_info(&service->dev, "skeleton remove\n");
+ kfree(info);
+ return 0;
+}
+
+static struct vs_service_driver server_skeleton_driver = {
+ .protocol = "com.ok-labs.skeleton",
+ .is_server = true,
+ .probe = vs_skeleton_probe,
+ .remove = vs_skeleton_remove,
+ .start = vs_skeleton_handle_start,
+ .receive = vs_skeleton_handle_message,
+ .notify = vs_skeleton_handle_notify,
+ .reset = vs_skeleton_handle_reset,
+ .driver = {
+ .name = "vs-server-skeleton",
+ .owner = THIS_MODULE,
+ .bus = &vs_server_bus_type,
+ },
+};
+
+static struct vs_service_driver client_skeleton_driver = {
+ .protocol = "com.ok-labs.skeleton",
+ .is_server = false,
+ .probe = vs_skeleton_probe,
+ .remove = vs_skeleton_remove,
+ .start = vs_skeleton_handle_start,
+ .receive = vs_skeleton_handle_message,
+ .notify = vs_skeleton_handle_notify,
+ .reset = vs_skeleton_handle_reset,
+ .driver = {
+ .name = "vs-client-skeleton",
+ .owner = THIS_MODULE,
+ .bus = &vs_client_bus_type,
+ },
+};
+
+static int __init vs_skeleton_init(void)
+{
+ int ret;
+
+ ret = driver_register(&server_skeleton_driver.driver);
+ if (ret)
+ return ret;
+
+ ret = driver_register(&client_skeleton_driver.driver);
+ if (ret)
+ driver_unregister(&server_skeleton_driver.driver);
+
+ return ret;
+}
+
+static void __exit vs_skeleton_exit(void)
+{
+ driver_unregister(&server_skeleton_driver.driver);
+ driver_unregister(&client_skeleton_driver.driver);
+}
+
+module_init(vs_skeleton_init);
+module_exit(vs_skeleton_exit);
+
+MODULE_DESCRIPTION("OKL4 Virtual Services Skeleton Client/Server Driver");
+MODULE_AUTHOR("Open Kernel Labs, Inc");
diff --git a/drivers/vservices/transport.h b/drivers/vservices/transport.h
new file mode 100644
index 0000000..8e5055c
--- /dev/null
+++ b/drivers/vservices/transport.h
@@ -0,0 +1,40 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the private interface that vServices transport drivers
+ * must provide to the vservices session and protocol layers. The transport,
+ * transport vtable, and message buffer structures are defined in the public
+ * <vservices/transport.h> header.
+ */
+
+#ifndef _VSERVICES_TRANSPORT_PRIV_H_
+#define _VSERVICES_TRANSPORT_PRIV_H_
+
+#include <linux/types.h>
+#include <linux/list.h>
+
+#include <vservices/transport.h>
+#include <vservices/types.h>
+#include <vservices/buffer.h>
+
+/**
+ * struct vs_notify_info - Notification information stored in the transport
+ * @service_id: Service id for this notification info
+ * @offset: Offset into the notification mapping
+ */
+struct vs_notify_info {
+ vs_service_id_t service_id;
+ unsigned offset;
+};
+
+#define VS_MAX_SERVICES 128
+#define VS_MAX_SERVICE_ID (VS_MAX_SERVICES - 1)
+
+#endif /* _VSERVICES_TRANSPORT_PRIV_H_ */
diff --git a/drivers/vservices/transport/Kconfig b/drivers/vservices/transport/Kconfig
new file mode 100644
index 0000000..37e84c4
--- /dev/null
+++ b/drivers/vservices/transport/Kconfig
@@ -0,0 +1,20 @@
+#
+# vServices Transport driver configuration
+#
+
+menu "Transport drivers"
+
+config VSERVICES_OKL4_AXON
+ tristate "OKL4 Microvisor Axon driver"
+ depends on VSERVICES_SUPPORT && OKL4_GUEST
+ default y
+ help
+ This option adds support for Virtual Services sessions using an OKL4
+ Microvisor Axon object as a transport.
+
+ If this driver is to be used in a Cell that has multiple
+ discontiguous regions in its physical memory pool, the
+ CONFIG_DMA_CMA option must also be selected (or CONFIG_CMA
+ in older kernels that do not have CONFIG_DMA_CMA).
+
+endmenu
diff --git a/drivers/vservices/transport/Makefile b/drivers/vservices/transport/Makefile
new file mode 100644
index 0000000..222fb51
--- /dev/null
+++ b/drivers/vservices/transport/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -Werror
+ccflags-$(CONFIG_VSERVICES_DEBUG) += -DDEBUG
+
+obj-$(CONFIG_VSERVICES_OKL4_AXON) += vtransport_axon.o
+vtransport_axon-objs = axon.o
diff --git a/drivers/vservices/transport/axon.c b/drivers/vservices/transport/axon.c
new file mode 100644
index 0000000..a140b4a
--- /dev/null
+++ b/drivers/vservices/transport/axon.c
@@ -0,0 +1,3573 @@
+/*
+ * drivers/vservices/transport/axon.c
+ *
+ * Copyright (c) 2015-2018 General Dynamics
+ * Copyright (c) 2015 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This is the OKL4 Virtual Services transport driver for OKL4 Microvisor
+ * Axons (virtual inter-Cell DMA engines).
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/platform_device.h>
+#include <linux/workqueue.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/string.h>
+#include <linux/io.h>
+#include <linux/list.h>
+#include <linux/jiffies.h>
+#include <linux/log2.h>
+#include <linux/version.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#include <linux/dma-mapping.h>
+#include <linux/dmapool.h>
+#include <linux/dma-contiguous.h>
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0)
+#include <asm/dma-contiguous.h>
+#endif
+#include <linux/vmalloc.h>
+#include <linux/mmzone.h>
+#include <asm-generic/okl4_virq.h>
+#include <asm/byteorder.h>
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/service.h>
+
+#include <microvisor/microvisor.h>
+
+#include "../transport.h"
+#include "../session.h"
+#include "../debug.h"
+
+#define DRIVER_AUTHOR "Cog Systems Pty Ltd"
+#define DRIVER_DESC "OKL4 vServices Axon Transport Driver"
+#define DRIVER_NAME "vtransport_axon"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 18, 0) || \
+ defined(CONFIG_NO_DEPRECATED_MEMORY_BARRIERS)
+#define smp_mb__before_atomic_dec smp_mb__before_atomic
+#define smp_mb__before_atomic_inc smp_mb__before_atomic
+#define smp_mb__after_atomic_dec smp_mb__after_atomic
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,8,0)
+#define DMA_ATTRS unsigned long
+#else
+#define DMA_ATTRS struct dma_attrs *
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 12, 0) && \
+ !defined(CONFIG_CMA)
+static inline struct cma *dev_get_cma_area(struct device *dev)
+{
+ return NULL;
+}
+#endif
+
+static struct kmem_cache *mbuf_cache;
+
+struct child_device {
+ struct device *dev;
+ struct list_head list;
+};
+
+/* Number of services in the transport array to allocate at a time */
+#define SERVICES_ALLOC_CHUNK 16
+#define MSG_SEND_FREE_BUFS VS_SERVICE_ID_RESERVED_1
+
+/* The maximum value we allow for the free_bufs_balance counter */
+#define MAX_BALANCE 1
+
+/*
+ * The free bufs quota must be enough to take free_bufs_balance from its
+ * minimum to its maximum.
+ */
+#define FREE_BUFS_QUOTA (MAX_BALANCE * 2)
+
+/*
+ * The free bufs retry delay is the period in jiffies that we delay retrying
+ * after an out-of-memory condition when trying to send a free bufs message.
+ */
+#define FREE_BUFS_RETRY_DELAY 2
+
+/* The minimum values we permit for queue and message size. */
+#define MIN_QUEUE_SIZE ((size_t)4)
+#define MIN_MSG_SIZE (32 - sizeof(vs_service_id_t))
+
+/*
+ * The maximum size for a batched receive. This should be larger than the
+ * maximum message size, and large enough to avoid excessive context switching
+ * overheads, yet small enough to avoid blocking the tasklet queue for too
+ * long.
+ */
+#define MAX_TRANSFER_CHUNK 65536
+
+#define INC_MOD(x, m) { \
+ x++; \
+ if (x == m) x = 0; \
+}
+
+/* Local Axon cleanup workqueue */
+struct workqueue_struct *work_queue;
+
+/*
+ * True if there is only one physical segment being used for kernel memory
+ * allocations. If this is false, the device must have a usable CMA region.
+ */
+static bool okl4_single_physical_segment;
+
+/* OKL4 MMU capability. */
+static okl4_kcap_t okl4_mmu_cap;
+
+/*
+ * Per-service TX buffer allocation pool.
+ *
+ * We cannot use a normal DMA pool for TX buffers, because alloc_mbuf can be
+ * called with GFP_ATOMIC, and a normal DMA pool alloc will take pages from
+ * a global emergency pool if GFP_WAIT is not set. The emergency pool is not
+ * guaranteed to be in the same physical segment as this device's DMA region,
+ * so it might not be usable by the axon.
+ *
+ * Using a very simple allocator with preallocated memory also speeds up the
+ * TX path.
+ *
+ * RX buffers use a standard Linux DMA pool, shared between all services,
+ * rather than this struct. They are preallocated by definition, so the speed
+ * of the allocator doesn't matter much for them. Also, they're always
+ * allocated with GFP_KERNEL (which includes GFP_WAIT) so the normal DMA pool
+ * will use memory from the axon's contiguous region.
+ */
+struct vs_axon_tx_pool {
+ struct vs_transport_axon *transport;
+ struct kref kref;
+
+ void *base_vaddr;
+ dma_addr_t base_laddr;
+
+ unsigned alloc_order;
+ unsigned count;
+
+ struct work_struct free_work;
+ unsigned long alloc_bitmap[];
+};
+
+struct vs_axon_rx_freelist_entry {
+ struct list_head list;
+ dma_addr_t laddr;
+};
+
+/* Service info */
+struct vs_mv_service_info {
+ struct vs_service_device *service;
+
+ /* True if the session has started the service */
+ bool ready;
+
+ /* Number of send buffers we have allocated, in total. */
+ atomic_t send_inflight;
+
+ /*
+ * Number of send buffers we have allocated but not yet sent.
+ * This should always be zero if ready is false.
+ */
+ atomic_t send_alloc;
+
+ /*
+ * Number of receive buffers we have received and not yet freed.
+ * This should always be zero if ready is false.
+ */
+ atomic_t recv_inflight;
+
+ /*
+ * Number of receive buffers we have freed, but not told the other end
+ * about yet.
+ *
+ * The watermark is the maximum number of freed buffers we can
+ * accumulate before we send a dummy message to the remote end to ack
+ * them. This is used in situations where the protocol allows the remote
+ * end to reach its send quota without guaranteeing a reply; the dummy
+ * message lets it make progress even if our service driver doesn't send
+ * an answer that we can piggy-back the acks on.
+ */
+ atomic_t recv_freed;
+ unsigned int recv_freed_watermark;
+
+ /*
+ * Number of buffers that have been left allocated after a reset. If
+ * this count is nonzero, then the service has been disabled by the
+ * session layer, and needs to be re-enabled when it reaches zero.
+ */
+ atomic_t outstanding_frees;
+
+ /* TX allocation pool */
+ struct vs_axon_tx_pool *tx_pool;
+
+ /* RX allocation count */
+ unsigned rx_allocated;
+
+ /* Reference count for this info struct. */
+ struct kref kref;
+
+ /* RCU head for cleanup */
+ struct rcu_head rcu_head;
+};
+
+/*
+ * Transport readiness state machine
+ *
+ * This is similar to the service readiness state machine, but simpler,
+ * because there are fewer transition triggers.
+ *
+ * The states are:
+ * INIT: Initial state. This occurs transiently during probe.
+ * LOCAL_RESET: We have initiated a reset at this end, but the remote end has
+ * not yet acknowledged it. We will enter the RESET state on receiving
+ * acknowledgement.
+ * RESET: The transport is inactive at both ends, and the session layer has
+ * not yet told us to start activating.
+ * LOCAL_READY: The session layer has told us to start activating, and we
+ * have notified the remote end that we're ready.
+ * REMOTE_READY: The remote end has notified us that it is ready, but the
+ * local session layer hasn't decided to become ready yet.
+ * ACTIVE: Both ends are ready to communicate.
+ * SHUTDOWN: The transport is shutting down and should not become ready.
+ */
+enum vs_transport_readiness {
+ VS_TRANSPORT_INIT = 0,
+ VS_TRANSPORT_LOCAL_RESET,
+ VS_TRANSPORT_RESET,
+ VS_TRANSPORT_LOCAL_READY,
+ VS_TRANSPORT_REMOTE_READY,
+ VS_TRANSPORT_ACTIVE,
+ VS_TRANSPORT_SHUTDOWN,
+};
+
+/*
+ * Transport reset / ready VIRQ payload bits
+ */
+enum vs_transport_reset_virq {
+ VS_TRANSPORT_VIRQ_RESET_REQ = (1 << 0),
+ VS_TRANSPORT_VIRQ_RESET_ACK = (1 << 1),
+ VS_TRANSPORT_VIRQ_READY = (1 << 2),
+};
+
+/*
+ * Internal definitions of the transport and message buffer structures.
+ */
+#define MAX_NOTIFICATION_LINES 16 /* Enough for 512 notifications each way */
+
+struct vs_transport_axon {
+ struct device *axon_dev;
+
+ struct okl4_axon_tx *tx;
+ struct okl4_axon_queue_entry *tx_descs;
+ struct vs_axon_tx_pool **tx_pools;
+ struct okl4_axon_rx *rx;
+ struct okl4_axon_queue_entry *rx_descs;
+ void **rx_ptrs;
+
+ dma_addr_t tx_phys, rx_phys;
+ size_t tx_size, rx_size;
+
+ okl4_kcap_t segment;
+ okl4_laddr_t segment_base;
+
+ okl4_kcap_t tx_cap, rx_cap, reset_cap;
+ unsigned int tx_irq, rx_irq, reset_irq;
+ okl4_interrupt_number_t reset_okl4_irq;
+
+ unsigned int notify_tx_nirqs;
+ okl4_kcap_t notify_cap[MAX_NOTIFICATION_LINES];
+ unsigned int notify_rx_nirqs;
+ unsigned int notify_irq[MAX_NOTIFICATION_LINES];
+
+ bool is_server;
+ size_t msg_size, queue_size;
+
+ /*
+ * The handle to the device tree node for the virtual-session node
+ * associated with the axon.
+ */
+ struct device_node *of_node;
+
+ struct list_head child_dev_list;
+
+ /*
+ * Hold queue and tx tasklet used to buffer and resend mbufs blocked
+ * by a full outgoing axon queue, due to a slow receiver or a halted
+ * axon.
+ */
+ struct list_head tx_queue;
+ struct tasklet_struct tx_tasklet;
+ u32 tx_uptr_freed;
+
+ /*
+ * The readiness state of the transport, and a spinlock protecting it.
+ * Note that this is different to the session's readiness state
+ * machine, though it has the same basic purpose.
+ */
+ enum vs_transport_readiness readiness;
+ spinlock_t readiness_lock;
+
+ struct tasklet_struct rx_tasklet;
+ struct timer_list rx_retry_timer;
+ struct list_head rx_freelist;
+ u32 rx_alloc_extra;
+ struct dma_pool *rx_pool;
+ spinlock_t rx_alloc_lock;
+ u32 rx_uptr_allocated;
+
+ struct vs_session_device *session_dev;
+ struct vs_transport transport;
+
+ DECLARE_BITMAP(service_bitmap, VS_SERVICE_ID_BITMAP_BITS);
+
+ struct delayed_work free_bufs_work;
+
+ /*
+ * Freed buffers messages balance counter. This counter is incremented
+ * when we send a freed buffers message and decremented when we receive
+ * one. If the balance is negative then we need to send a message
+ * as an acknowledgement to the other end, even if there are no
+ * freed buffers to acknowledge.
+ */
+ atomic_t free_bufs_balance;
+
+ /*
+ * Flag set when a service exceeds its freed buffers watermark,
+ * telling free_bufs_work to send a message when the balance
+ * counter is non-negative. This is ignored, and a message is
+ * sent in any case, if the balance is negative.
+ */
+ bool free_bufs_pending;
+
+ /* Pool for allocating outgoing free bufs messages */
+ struct vs_axon_tx_pool *free_bufs_pool;
+};
+
+#define to_vs_transport_axon(t) \
+ container_of(t, struct vs_transport_axon, transport)
+
+struct vs_mbuf_axon {
+ struct vs_mbuf base;
+ struct vs_transport_axon *owner;
+ dma_addr_t laddr;
+ struct vs_axon_tx_pool *pool;
+};
+
+#define to_vs_mbuf_axon(b) container_of(b, struct vs_mbuf_axon, base)
+
+/*
+ * Buffer allocation
+ *
+ * Buffers used by axons must be allocated within a single contiguous memory
+ * region, backed by a single OKL4 physical segment. This is similar to how
+ * the DMA allocator normally works, but we can't use the normal DMA allocator
+ * because the platform code will remap the allocated memory with caching
+ * disabled.
+ *
+ * We borrow the useful parts of the DMA allocator by providing our own DMA
+ * mapping ops which don't actually remap the memory.
+ */
+static void *axon_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *handle, gfp_t gfp, DMA_ATTRS attrs)
+{
+ unsigned long order;
+ size_t count;
+ struct page *page;
+ void *ptr;
+
+ *handle = DMA_ERROR_CODE;
+ size = PAGE_ALIGN(size);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0)
+ if (!(gfp & __GFP_WAIT))
+#else
+ if (!(gfp & __GFP_RECLAIM))
+#endif
+ return NULL;
+
+ order = get_order(size);
+ count = size >> PAGE_SHIFT;
+
+ if (dev_get_cma_area(dev)) {
+ page = dma_alloc_from_contiguous(dev, count, order);
+
+ if (!page)
+ return NULL;
+ } else {
+ struct page *p, *e;
+ page = alloc_pages(gfp, order);
+
+ if (!page)
+ return NULL;
+
+ /* Split huge page and free any excess pages */
+ split_page(page, order);
+ for (p = page + count, e = page + (1 << order); p < e; p++)
+ __free_page(p);
+ }
+
+ if (PageHighMem(page)) {
+ struct vm_struct *area = get_vm_area(size, VM_USERMAP);
+ if (!area)
+ goto free_pages;
+ ptr = area->addr;
+ area->phys_addr = __pfn_to_phys(page_to_pfn(page));
+
+ if (ioremap_page_range((unsigned long)ptr,
+ (unsigned long)ptr + size,
+ area->phys_addr, PAGE_KERNEL)) {
+ vunmap(ptr);
+ goto free_pages;
+ }
+ } else {
+ ptr = page_address(page);
+ }
+
+ *handle = (dma_addr_t)page_to_pfn(page) << PAGE_SHIFT;
+
+ dev_dbg(dev, "dma_alloc: %#tx bytes at %pK (%#llx), %s cma, %s high\n",
+ size, ptr, (long long)*handle,
+ dev_get_cma_area(dev) ? "is" : "not",
+ PageHighMem(page) ? "is" : "not");
+
+ return ptr;
+
+free_pages:
+ if (dev_get_cma_area(dev)) {
+ dma_release_from_contiguous(dev, page, count);
+ } else {
+ struct page *e = page + count;
+
+ while (page < e) {
+ __free_page(page);
+ page++;
+ }
+ }
+
+ return NULL;
+}
+
+static void axon_dma_free(struct device *dev, size_t size, void *cpu_addr,
+ dma_addr_t handle, DMA_ATTRS attrs)
+{
+ struct page *page = pfn_to_page(handle >> PAGE_SHIFT);
+
+ size = PAGE_ALIGN(size);
+
+ if (PageHighMem(page)) {
+ unmap_kernel_range((unsigned long)cpu_addr, size);
+ vunmap(cpu_addr);
+ }
+
+ if (dev_get_cma_area(dev)) {
+ dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
+ } else {
+ struct page *e = page + (size >> PAGE_SHIFT);
+
+ while (page < e) {
+ __free_page(page);
+ page++;
+ }
+ }
+}
+
+struct dma_map_ops axon_dma_ops = {
+ .alloc = axon_dma_alloc,
+ .free = axon_dma_free,
+};
+
+/*
+ * Quotas
+ * ------
+ *
+ * Each service has two quotas, one for send and one for receive. The
+ * send quota is incremented when we allocate an mbuf. The send quota
+ * is decremented by receiving an freed buffer ack from the remove
+ * end, either in the reserved bits of the service id or in a special
+ * free bufs message.
+ *
+ * The receive quota is incremented whenever we receive a message and
+ * decremented when we free the mbuf. Exceeding the receive quota
+ * indicates that something bad has happened since the other end's
+ * send quota should have prevented it from sending the
+ * message. Exceeding the receive quota indicates a driver bug since
+ * the two ends are disagreeing about the quotas. If this happens then
+ * a warning is printed and the offending service is reset.
+ */
+
+/*
+ * The base of the mbuf has the destination service id, but we pass the
+ * data pointer starting after the service id. The following helper
+ * functions are used to avoid ugly pointer arithmetic when handling
+ * mbufs.
+ */
+static size_t mbuf_real_size(struct vs_mbuf_axon *mbuf)
+{
+ return mbuf->base.size + sizeof(vs_service_id_t);
+}
+
+static void *mbuf_real_base(struct vs_mbuf_axon *mbuf)
+{
+ return mbuf->base.data - sizeof(vs_service_id_t);
+}
+/*
+ * Get the service_id and reserved bits from a message buffer and the
+ * clear the reserved bits so the upper layers don't see them.
+ */
+vs_service_id_t
+transport_get_mbuf_service_id(struct vs_transport_axon *transport,
+ void *data, unsigned int *freed_acks)
+{
+ unsigned int reserved_bits;
+ vs_service_id_t id;
+
+ /* Get the real service id and reserved bits */
+ id = *(vs_service_id_t *)data;
+ reserved_bits = vs_get_service_id_reserved_bits(id);
+ id = vs_get_real_service_id(id);
+
+ /* Clear the reserved bits in the service id */
+ vs_set_service_id_reserved_bits(&id, 0);
+ if (freed_acks) {
+ *(vs_service_id_t *)data = id;
+ *freed_acks = reserved_bits;
+ }
+ return id;
+}
+
+static void
+__transport_get_service_info(struct vs_mv_service_info *service_info)
+{
+ kref_get(&service_info->kref);
+}
+
+static struct vs_mv_service_info *
+transport_get_service_info(struct vs_service_device *service)
+{
+ struct vs_mv_service_info *service_info;
+
+ rcu_read_lock();
+ service_info = rcu_dereference(service->transport_priv);
+ if (service_info)
+ __transport_get_service_info(service_info);
+ rcu_read_unlock();
+
+ return service_info;
+}
+
+static struct vs_mv_service_info *
+transport_get_service_id_info(struct vs_transport_axon *transport,
+ vs_service_id_t service_id)
+{
+ struct vs_service_device *service;
+ struct vs_mv_service_info *service_info;
+
+ service = vs_session_get_service(transport->session_dev, service_id);
+ if (!service)
+ return NULL;
+
+ service_info = transport_get_service_info(service);
+
+ vs_put_service(service);
+ return service_info;
+}
+
+static void transport_info_free(struct rcu_head *rcu_head)
+{
+ struct vs_mv_service_info *service_info =
+ container_of(rcu_head, struct vs_mv_service_info, rcu_head);
+
+ vs_put_service(service_info->service);
+ kfree(service_info);
+}
+
+static void transport_info_release(struct kref *kref)
+{
+ struct vs_mv_service_info *service_info =
+ container_of(kref, struct vs_mv_service_info, kref);
+
+ call_rcu(&service_info->rcu_head, transport_info_free);
+}
+
+static void transport_put_service_info(struct vs_mv_service_info *service_info)
+{
+ kref_put(&service_info->kref, transport_info_release);
+}
+
+static bool transport_axon_reset(struct vs_transport_axon *transport);
+
+static void transport_fatal_error(struct vs_transport_axon *transport,
+ const char *msg)
+{
+ dev_err(transport->axon_dev, "Fatal transport error (%s); resetting\n",
+ msg);
+#ifdef DEBUG
+ dump_stack();
+#endif
+ transport_axon_reset(transport);
+}
+
+static unsigned int reduce_send_quota(struct vs_transport_axon *transport,
+ struct vs_mv_service_info *service_info, unsigned int count,
+ bool allow_tx_ready)
+{
+ int new_inflight, send_alloc;
+ bool was_over_quota, is_over_quota;
+
+ /* FIXME: Redmine issue #1303 - philip. */
+ spin_lock_irq(&transport->readiness_lock);
+ /*
+ * We read the current send_alloc for error checking *before*
+ * decrementing send_inflight. This avoids any false positives
+ * due to send_alloc being incremented by a concurrent alloc_mbuf.
+ *
+ * Note that there is an implicit smp_mb() before atomic_sub_return(),
+ * matching the explicit one in alloc_mbuf.
+ */
+ send_alloc = atomic_read(&service_info->send_alloc);
+ new_inflight = atomic_sub_return(count, &service_info->send_inflight);
+
+ spin_unlock_irq(&transport->readiness_lock);
+ if (WARN_ON(new_inflight < send_alloc)) {
+ dev_err(transport->axon_dev,
+ "inflight sent messages for service %d is less than the number of allocated messages (%d < %d, was reduced by %d)\n",
+ service_info->service->id, new_inflight,
+ send_alloc, count);
+ transport_fatal_error(transport, "sent msg count underrun");
+ return 0;
+ }
+
+ was_over_quota = (new_inflight + count >=
+ service_info->service->send_quota);
+ is_over_quota = (new_inflight > service_info->service->send_quota);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Service %d quota %d -> %d (over_quota: %d -> %d)\n",
+ service_info->service->id, new_inflight + count,
+ new_inflight, was_over_quota, is_over_quota);
+
+ /*
+ * Notify the service that a buffer has been freed. We call tx_ready
+ * if this is a notification from the remote end (i.e. not an unsent
+ * buffer) and the quota has just dropped below the maximum.
+ */
+ vs_session_quota_available(transport->session_dev,
+ service_info->service->id, count,
+ !is_over_quota && was_over_quota && allow_tx_ready);
+
+ return count;
+}
+
+static void __transport_tx_pool_free(struct vs_axon_tx_pool *pool,
+ dma_addr_t laddr);
+
+static void
+__transport_tx_cleanup(struct vs_transport_axon *transport)
+{
+ u32 uptr;
+ struct okl4_axon_queue_entry *desc;
+
+ lockdep_assert_held(&transport->readiness_lock);
+
+ uptr = transport->tx_uptr_freed;
+ desc = &transport->tx_descs[uptr];
+
+ while (!okl4_axon_data_info_getpending(&desc->info)) {
+ if (!transport->tx_pools[uptr])
+ break;
+
+ __transport_tx_pool_free(transport->tx_pools[uptr],
+ okl4_axon_data_info_getladdr(&desc->info));
+ transport->tx_pools[uptr] = NULL;
+
+ INC_MOD(uptr, transport->tx->queues[0].entries);
+ desc = &transport->tx_descs[uptr];
+ transport->tx_uptr_freed = uptr;
+ }
+}
+
+static void
+transport_axon_free_tx_pool(struct work_struct *work)
+{
+ struct vs_axon_tx_pool *pool = container_of(work,
+ struct vs_axon_tx_pool, free_work);
+ struct vs_transport_axon *transport = pool->transport;
+
+ dmam_free_coherent(transport->axon_dev,
+ pool->count << pool->alloc_order,
+ pool->base_vaddr, pool->base_laddr);
+ devm_kfree(transport->axon_dev, pool);
+}
+
+static void
+transport_axon_queue_free_tx_pool(struct kref *kref)
+{
+ struct vs_axon_tx_pool *pool = container_of(kref,
+ struct vs_axon_tx_pool, kref);
+
+ /*
+ * Put the task on the axon local work queue for running in
+ * a context where IRQ is enabled.
+ */
+ INIT_WORK(&pool->free_work, transport_axon_free_tx_pool);
+ queue_work(work_queue, &pool->free_work);
+}
+
+static void
+transport_axon_put_tx_pool(struct vs_axon_tx_pool *pool)
+{
+ kref_put(&pool->kref, transport_axon_queue_free_tx_pool);
+}
+
+/* Low-level tx buffer allocation, without quota tracking. */
+static struct vs_mbuf_axon *
+__transport_alloc_mbuf(struct vs_transport_axon *transport,
+ vs_service_id_t service_id, struct vs_axon_tx_pool *pool,
+ size_t size, gfp_t gfp_flags)
+{
+ size_t real_size = size + sizeof(vs_service_id_t);
+ struct vs_mbuf_axon *mbuf;
+ unsigned index;
+
+ if (WARN_ON(real_size > (1 << pool->alloc_order))) {
+ dev_err(transport->axon_dev, "Message too big (%zu > %zu)\n",
+ real_size, (size_t)1 << pool->alloc_order);
+ goto fail_message_size;
+ }
+
+ kref_get(&pool->kref);
+
+ do {
+ index = find_first_zero_bit(pool->alloc_bitmap, pool->count);
+ if (unlikely(index >= pool->count)) {
+ /*
+ * No buffers left. This can't be an out-of-quota
+ * situation, because we've already checked the quota;
+ * it must be because there's a buffer left over in
+ * the tx queue. Clean out the tx queue and retry.
+ */
+ spin_lock_irq(&transport->readiness_lock);
+ __transport_tx_cleanup(transport);
+ spin_unlock_irq(&transport->readiness_lock);
+
+ index = find_first_zero_bit(pool->alloc_bitmap,
+ pool->count);
+ }
+ if (unlikely(index >= pool->count))
+ goto fail_buffer_alloc;
+ } while (unlikely(test_and_set_bit_lock(index, pool->alloc_bitmap)));
+
+ mbuf = kmem_cache_alloc(mbuf_cache, gfp_flags & ~GFP_ZONEMASK);
+ if (!mbuf)
+ goto fail_mbuf_alloc;
+
+ mbuf->base.is_recv = false;
+ mbuf->base.data = pool->base_vaddr + (index << pool->alloc_order);
+ mbuf->base.size = size;
+ mbuf->owner = transport;
+ mbuf->laddr = pool->base_laddr + (index << pool->alloc_order);
+ mbuf->pool = pool;
+
+ /*
+ * We put the destination service id in the mbuf, but increment the
+ * data pointer past it so the receiver doesn't always need to skip
+ * the service id.
+ */
+ *(vs_service_id_t *)mbuf->base.data = service_id;
+ mbuf->base.data += sizeof(vs_service_id_t);
+
+ return mbuf;
+
+fail_mbuf_alloc:
+ clear_bit_unlock(index, pool->alloc_bitmap);
+fail_buffer_alloc:
+ transport_axon_put_tx_pool(pool);
+fail_message_size:
+ return NULL;
+}
+
+/* Allocate a tx buffer for a specified service. */
+static struct vs_mbuf *transport_alloc_mbuf(struct vs_transport *_transport,
+ struct vs_service_device *service, size_t size, gfp_t gfp_flags)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ size_t real_size = size + sizeof(vs_service_id_t);
+ struct vs_mv_service_info *service_info = NULL;
+ struct vs_mbuf_axon *mbuf;
+ vs_service_id_t service_id = service->id;
+
+ if (real_size > transport->msg_size) {
+ dev_err(transport->axon_dev, "Message too big (%zu > %zu)\n",
+ real_size, transport->msg_size);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (WARN_ON(service_id == MSG_SEND_FREE_BUFS))
+ return ERR_PTR(-ENXIO);
+
+ service_info = transport_get_service_info(service);
+ if (WARN_ON(!service_info))
+ return ERR_PTR(-EINVAL);
+
+ if (!service_info->tx_pool) {
+ transport_put_service_info(service_info);
+ return ERR_PTR(-ECONNRESET);
+ }
+
+ if (!atomic_add_unless(&service_info->send_inflight, 1,
+ service_info->service->send_quota)) {
+ /* Service has reached its quota */
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Service %d is at max send quota %d\n",
+ service_id, service_info->service->send_quota);
+ transport_put_service_info(service_info);
+ return ERR_PTR(-ENOBUFS);
+ }
+
+ /*
+ * Increment the count of allocated but unsent mbufs. This is done
+ * *after* the send_inflight increment (with a barrier to enforce
+ * ordering) to ensure that send_inflight is never less than
+ * send_alloc - see reduce_send_quota().
+ */
+ smp_mb__before_atomic_inc();
+ atomic_inc(&service_info->send_alloc);
+
+ mbuf = __transport_alloc_mbuf(transport, service_id,
+ service_info->tx_pool, size, gfp_flags);
+ if (!mbuf) {
+ /*
+ * Failed to allocate a buffer - decrement our quota back to
+ * where it was.
+ */
+ atomic_dec(&service_info->send_alloc);
+ smp_mb__after_atomic_dec();
+ atomic_dec(&service_info->send_inflight);
+
+ transport_put_service_info(service_info);
+
+ return ERR_PTR(-ENOMEM);
+ }
+
+ transport_put_service_info(service_info);
+
+ return &mbuf->base;
+}
+
+static void transport_free_sent_mbuf(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf)
+{
+ kmem_cache_free(mbuf_cache, mbuf);
+}
+
+static void __transport_tx_pool_free(struct vs_axon_tx_pool *pool,
+ dma_addr_t laddr)
+{
+ unsigned index = (laddr - pool->base_laddr) >> pool->alloc_order;
+
+ if (WARN_ON(index >= pool->count)) {
+ printk(KERN_DEBUG "free %#llx base %#llx order %d count %d\n",
+ (long long)laddr, (long long)pool->base_laddr,
+ pool->alloc_order, pool->count);
+ return;
+ }
+
+ clear_bit_unlock(index, pool->alloc_bitmap);
+ transport_axon_put_tx_pool(pool);
+}
+
+static int transport_rx_queue_buffer(struct vs_transport_axon *transport,
+ void *ptr, dma_addr_t laddr);
+
+static void transport_rx_recycle(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf)
+{
+ void *data = mbuf_real_base(mbuf);
+ dma_addr_t laddr = mbuf->laddr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&transport->rx_alloc_lock, flags);
+
+ if (transport->rx_alloc_extra) {
+ transport->rx_alloc_extra--;
+ dma_pool_free(transport->rx_pool, data, laddr);
+ } else if (transport_rx_queue_buffer(transport, data, laddr) < 0) {
+ struct vs_axon_rx_freelist_entry *buf = data;
+ buf->laddr = laddr;
+ list_add_tail(&buf->list, &transport->rx_freelist);
+ tasklet_schedule(&transport->rx_tasklet);
+ } else {
+ tasklet_schedule(&transport->rx_tasklet);
+ }
+
+ spin_unlock_irqrestore(&transport->rx_alloc_lock, flags);
+}
+
+static void transport_free_mbuf_pools(struct vs_transport_axon *transport,
+ struct vs_service_device *service,
+ struct vs_mv_service_info *service_info)
+{
+ /*
+ * Free the TX allocation pool. This will also free any buffer
+ * memory allocated from the pool, so it is essential that
+ * this happens only after we have successfully freed all
+ * mbufs.
+ *
+ * Note that the pool will not exist if the core client is reset
+ * before it receives a startup message.
+ */
+ if (!IS_ERR_OR_NULL(service_info->tx_pool))
+ transport_axon_put_tx_pool(service_info->tx_pool);
+ service_info->tx_pool = NULL;
+
+ /* Mark the service's preallocated RX buffers as extra. */
+ spin_lock_irq(&transport->rx_alloc_lock);
+ transport->rx_alloc_extra += service_info->rx_allocated;
+ service_info->rx_allocated = 0;
+ spin_unlock_irq(&transport->rx_alloc_lock);
+}
+
+/* Low-level tx or rx buffer free, with no quota tracking */
+static void __transport_free_mbuf(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf, bool is_rx)
+{
+ if (is_rx) {
+ transport_rx_recycle(transport, mbuf);
+ } else {
+ __transport_tx_pool_free(mbuf->pool, mbuf->laddr);
+ }
+
+ kmem_cache_free(mbuf_cache, mbuf);
+}
+
+static void transport_free_mbuf(struct vs_transport *_transport,
+ struct vs_service_device *service, struct vs_mbuf *_mbuf)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+ struct vs_mv_service_info *service_info = NULL;
+ void *data = mbuf_real_base(mbuf);
+ vs_service_id_t service_id __maybe_unused =
+ transport_get_mbuf_service_id(transport, data, NULL);
+ bool is_recv = mbuf->base.is_recv;
+
+ WARN_ON(!service);
+ service_info = transport_get_service_info(service);
+
+ __transport_free_mbuf(transport, mbuf, is_recv);
+
+ /*
+ * If this message was left over from a service that has already been
+ * deleted, we don't need to do any quota accounting.
+ */
+ if (!service_info)
+ return;
+
+ if (unlikely(atomic_read(&service_info->outstanding_frees))) {
+ if (atomic_dec_and_test(&service_info->outstanding_frees)) {
+ dev_dbg(transport->axon_dev,
+ "service %d all outstanding frees done\n",
+ service->id);
+ transport_free_mbuf_pools(transport, service,
+ service_info);
+ vs_service_enable(service);
+ } else {
+ dev_dbg(transport->axon_dev,
+ "service %d outstanding frees -> %d\n",
+ service->id, atomic_read(
+ &service_info->outstanding_frees));
+ }
+ } else if (is_recv) {
+ smp_mb__before_atomic_dec();
+ atomic_dec(&service_info->recv_inflight);
+ if (atomic_inc_return(&service_info->recv_freed) >=
+ service_info->recv_freed_watermark) {
+ transport->free_bufs_pending = true;
+ schedule_delayed_work(&transport->free_bufs_work, 0);
+ }
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Freed recv buffer for service %d rq=%d/%d, freed=%d (watermark = %d)\n",
+ service_id,
+ atomic_read(&service_info->recv_inflight),
+ service_info->service->recv_quota,
+ atomic_read(&service_info->recv_freed),
+ service_info->recv_freed_watermark);
+ } else {
+ /*
+ * We are freeing a message buffer that we allocated. This
+ * usually happens on error paths in application drivers if
+ * we allocated a buffer but failed to send it. In this case
+ * we need to decrement our own send quota since we didn't
+ * send anything.
+ */
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Freeing send buffer for service %d, send quota = %d\n",
+ service_id, atomic_read(&service_info->send_inflight));
+
+ smp_mb__before_atomic_dec();
+ atomic_dec(&service_info->send_alloc);
+
+ /*
+ * We don't allow the tx_ready handler to run when we are
+ * freeing an mbuf that we allocated.
+ */
+ reduce_send_quota(transport, service_info, 1, false);
+ }
+
+ transport_put_service_info(service_info);
+}
+
+static size_t transport_mbuf_size(struct vs_mbuf *_mbuf)
+{
+ struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+
+ return mbuf_real_size(mbuf);
+}
+
+static size_t transport_max_mbuf_size(struct vs_transport *_transport)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+ return transport->msg_size - sizeof(vs_service_id_t);
+}
+
+static int okl4_error_to_errno(okl4_error_t err) {
+ switch (err) {
+ case OKL4_OK:
+ return 0;
+ case OKL4_ERROR_AXON_QUEUE_NOT_MAPPED:
+ /* Axon has been reset locally */
+ return -ECONNRESET;
+ case OKL4_ERROR_AXON_QUEUE_NOT_READY:
+ /* No message buffers in the queue. */
+ return -ENOBUFS;
+ case OKL4_ERROR_AXON_INVALID_OFFSET:
+ case OKL4_ERROR_AXON_AREA_TOO_BIG:
+ /* Buffer address is bad */
+ return -EFAULT;
+ case OKL4_ERROR_AXON_BAD_MESSAGE_SIZE:
+ case OKL4_ERROR_AXON_TRANSFER_LIMIT_EXCEEDED:
+ /* One of the Axon's message size limits has been exceeded */
+ return -EMSGSIZE;
+ default:
+ /* Miscellaneous failure, probably a bad cap */
+ return -EIO;
+ }
+}
+
+static void queue_tx_mbuf(struct vs_mbuf_axon *mbuf, struct vs_transport_axon *priv,
+ vs_service_id_t service_id)
+{
+ list_add_tail(&mbuf->base.queue, &priv->tx_queue);
+}
+
+static void free_tx_mbufs(struct vs_transport_axon *priv)
+{
+ struct vs_mbuf_axon *child, *tmp;
+
+ list_for_each_entry_safe(child, tmp, &priv->tx_queue, base.queue) {
+ list_del(&child->base.queue);
+ __transport_free_mbuf(priv, child, false);
+ }
+}
+
+static int __transport_flush(struct vs_transport_axon *transport)
+{
+ _okl4_sys_axon_trigger_send(transport->tx_cap);
+ return 0;
+}
+
+static int transport_flush(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+ return __transport_flush(transport);
+}
+
+/*
+ * Low-level transport message send function.
+ *
+ * The caller must hold the transport->readiness_lock, and is responsible for
+ * freeing the mbuf on successful send (use transport_free_sent_mbuf). The
+ * mbuf should _not_ be freed if this function fails. The Virtual Service
+ * driver is responsible for freeing the mbuf in the failure case.
+ */
+static int __transport_send(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf, vs_service_id_t service_id,
+ unsigned long flags)
+{
+ u32 uptr;
+ struct okl4_axon_queue_entry *desc;
+ struct vs_axon_tx_pool *old_pool;
+ dma_addr_t old_laddr;
+
+ lockdep_assert_held(&transport->readiness_lock);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "send %zu bytes to service %d\n",
+ mbuf->base.size, service_id);
+ vs_debug_dump_mbuf(transport->session_dev, &mbuf->base);
+
+ uptr = ACCESS_ONCE(transport->tx->queues[0].uptr);
+ desc = &transport->tx_descs[uptr];
+
+ /* Is the descriptor ready to use? */
+ if (okl4_axon_data_info_getpending(&desc->info))
+ return -ENOSPC;
+ mb();
+
+ /* The descriptor is ours; save its old state and increment the uptr */
+ old_pool = transport->tx_pools[uptr];
+ if (old_pool != NULL)
+ old_laddr = okl4_axon_data_info_getladdr(&desc->info);
+ transport->tx_pools[uptr] = mbuf->pool;
+
+ INC_MOD(uptr, transport->tx->queues[0].entries);
+ ACCESS_ONCE(transport->tx->queues[0].uptr) = uptr;
+
+ /* Set up the descriptor */
+ desc->data_size = mbuf_real_size(mbuf);
+ okl4_axon_data_info_setladdr(&desc->info, mbuf->laddr);
+
+ /* Message is ready to go */
+ wmb();
+ okl4_axon_data_info_setpending(&desc->info, true);
+
+ if (flags & VS_TRANSPORT_SEND_FLAGS_MORE) {
+ /*
+ * This is a batched message, so we normally don't flush,
+ * unless we've filled the queue completely.
+ *
+ * Races on the queue descriptor don't matter here, because
+ * this is only an optimisation; the service should do an
+ * explicit flush when it finishes the batch anyway.
+ */
+ desc = &transport->tx_descs[uptr];
+ if (okl4_axon_data_info_getpending(&desc->info))
+ __transport_flush(transport);
+ } else {
+ __transport_flush(transport);
+ }
+
+ /* Free any buffer previously in the descriptor */
+ if (old_pool != NULL) {
+ u32 uptr_freed = transport->tx_uptr_freed;
+ INC_MOD(uptr_freed, transport->tx->queues[0].entries);
+ WARN_ON(uptr_freed != uptr);
+ __transport_tx_pool_free(old_pool, old_laddr);
+ transport->tx_uptr_freed = uptr_freed;
+ }
+
+ return 0;
+}
+
+static int transport_send_might_queue(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf, vs_service_id_t service_id,
+ unsigned long flags, bool *queued)
+{
+ int ret = 0;
+
+ lockdep_assert_held(&transport->readiness_lock);
+ *queued = false;
+
+ if (transport->readiness != VS_TRANSPORT_ACTIVE)
+ return -ECONNRESET;
+
+ if (!list_empty(&transport->tx_queue)) {
+ *queued = true;
+ } else {
+ ret = __transport_send(transport, mbuf, service_id, flags);
+ if (ret == -ENOSPC) {
+ *queued = true;
+ ret = 0;
+ }
+ }
+
+ if (*queued)
+ queue_tx_mbuf(mbuf, transport, service_id);
+
+ return ret;
+}
+
+static int transport_send(struct vs_transport *_transport,
+ struct vs_service_device *service, struct vs_mbuf *_mbuf,
+ unsigned long flags)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_mbuf_axon *mbuf = to_vs_mbuf_axon(_mbuf);
+ struct vs_mv_service_info *service_info;
+ vs_service_id_t service_id;
+ int recv_freed, freed_acks;
+ bool queued;
+ int err;
+ unsigned long irqflags;
+
+ if (WARN_ON(!transport || !mbuf || mbuf->owner != transport))
+ return -EINVAL;
+
+ service_id = transport_get_mbuf_service_id(transport,
+ mbuf_real_base(mbuf), NULL);
+
+ if (WARN_ON(service_id != service->id))
+ return -EINVAL;
+
+ service_info = transport_get_service_info(service);
+ if (!service_info)
+ return -EINVAL;
+
+ if (mbuf->base.is_recv) {
+ /*
+ * This message buffer was allocated for receive. We don't
+ * allow receive message buffers to be reused for sending
+ * because it makes our quotas inconsistent.
+ */
+ dev_err(&service_info->service->dev,
+ "Attempted to send a received message buffer\n");
+ transport_put_service_info(service_info);
+ return -EINVAL;
+ }
+
+ if (!service_info->ready) {
+ transport_put_service_info(service_info);
+ return -ECOMM;
+ }
+
+ /*
+ * Set the message's service id reserved bits to the number of buffers
+ * we have freed. We can only ack 2 ^ VS_SERVICE_ID_RESERVED_BITS - 1
+ * buffers in one message.
+ */
+ do {
+ recv_freed = atomic_read(&service_info->recv_freed);
+ freed_acks = min_t(int, recv_freed,
+ VS_SERVICE_ID_TRANSPORT_MASK);
+ } while (recv_freed != atomic_cmpxchg(&service_info->recv_freed,
+ recv_freed, recv_freed - freed_acks));
+
+ service_id = service_info->service->id;
+ vs_set_service_id_reserved_bits(&service_id, freed_acks);
+ *(vs_service_id_t *)mbuf_real_base(mbuf) = service_id;
+
+ spin_lock_irqsave(&transport->readiness_lock, irqflags);
+ err = transport_send_might_queue(transport, mbuf,
+ service_info->service->id, flags, &queued);
+ if (err) {
+ /* We failed to send, so revert the freed acks */
+ if (atomic_add_return(freed_acks,
+ &service_info->recv_freed) >=
+ service_info->recv_freed_watermark) {
+ transport->free_bufs_pending = true;
+ schedule_delayed_work(&transport->free_bufs_work, 0);
+ }
+ transport_put_service_info(service_info);
+ spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+ return err;
+ }
+
+ atomic_dec(&service_info->send_alloc);
+
+ if (queued) {
+ transport_put_service_info(service_info);
+ spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+ return 0;
+ }
+
+ /*
+ * The mbuf was sent successfully. We can free it locally since it is
+ * now owned by the remote end.
+ */
+ transport_free_sent_mbuf(transport, mbuf);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Send okay: service %d (0x%.2x) sq=%d/%d, alloc--=%d, rq=%d/%d, freed=%d/%d, bc=%d\n",
+ service_info->service->id, service_id,
+ atomic_read(&service_info->send_inflight),
+ service_info->service->send_quota,
+ atomic_read(&service_info->send_alloc),
+ atomic_read(&service_info->recv_inflight),
+ service_info->service->recv_quota, freed_acks,
+ atomic_read(&service_info->recv_freed),
+ atomic_read(&transport->free_bufs_balance));
+
+ transport_put_service_info(service_info);
+ spin_unlock_irqrestore(&transport->readiness_lock, irqflags);
+
+ return 0;
+}
+
+static void transport_free_bufs_work(struct work_struct *work)
+{
+ struct delayed_work *dwork = to_delayed_work(work);
+ struct vs_transport_axon *transport = container_of(dwork,
+ struct vs_transport_axon, free_bufs_work);
+ struct vs_mbuf_axon *mbuf;
+ int i, err, count = 0, old_balance;
+ bool queued;
+ size_t size;
+ u16 *p;
+
+ /*
+ * Atomically decide whether to send a message, and increment
+ * the balance if we are going to.
+ *
+ * We don't need barriers before these reads because they're
+ * implicit in the work scheduling.
+ */
+ do {
+ old_balance = atomic_read(&transport->free_bufs_balance);
+
+ /*
+ * We only try to send if the balance is negative,
+ * or if we have been triggered by going over a
+ * watermark.
+ */
+ if (old_balance >= 0 && !transport->free_bufs_pending)
+ return;
+
+ /*
+ * If we've hit the max balance, we can't send. The
+ * tasklet will be rescheduled next time the balance
+ * is decremented, if free_bufs_pending is true.
+ */
+ if (old_balance >= MAX_BALANCE)
+ return;
+
+ } while (old_balance != atomic_cmpxchg(&transport->free_bufs_balance,
+ old_balance, old_balance + 1));
+
+ /* Try to allocate a message buffer. */
+ mbuf = __transport_alloc_mbuf(transport, MSG_SEND_FREE_BUFS,
+ transport->free_bufs_pool,
+ transport->msg_size - sizeof(vs_service_id_t),
+ GFP_KERNEL | __GFP_NOWARN);
+ if (!mbuf) {
+ /* Out of memory at the moment; retry later. */
+ atomic_dec(&transport->free_bufs_balance);
+ schedule_delayed_work(dwork, FREE_BUFS_RETRY_DELAY);
+ return;
+ }
+
+ /*
+ * Clear free_bufs_pending, because we are going to try to send. We
+ * need a write barrier afterwards to guarantee that this write is
+ * ordered before any writes to the recv_freed counts, and therefore
+ * before any remote free_bufs_pending = true when a service goes
+ * over its watermark right after we inspect it.
+ *
+ * The matching barrier is implicit in the atomic_inc_return in
+ * transport_free_mbuf().
+ */
+ transport->free_bufs_pending = false;
+ smp_wmb();
+
+ /*
+ * Fill in the buffer. Message format is:
+ *
+ * u16: Number of services
+ *
+ * For each service:
+ * u16: Service ID
+ * u16: Number of freed buffers
+ */
+ p = mbuf->base.data;
+ *(p++) = 0;
+
+ for_each_set_bit(i, transport->service_bitmap,
+ VS_SERVICE_ID_BITMAP_BITS) {
+ struct vs_mv_service_info *service_info;
+ int recv_freed;
+ u16 freed_acks;
+
+ service_info = transport_get_service_id_info(transport, i);
+ if (!service_info)
+ continue;
+
+ /*
+ * Don't let the message exceed the maximum size for the
+ * transport.
+ */
+ size = sizeof(vs_service_id_t) + sizeof(u16) +
+ (count * (2 * sizeof(u16)));
+ if (size > transport->msg_size) {
+ /* FIXME: Jira ticket SDK-3131 - ryanm. */
+ transport_put_service_info(service_info);
+ transport->free_bufs_pending = true;
+ break;
+ }
+
+ /*
+ * We decrement each service's quota immediately by up to
+ * USHRT_MAX. If we subsequently fail to send the message then
+ * we return the count to what it was previously.
+ */
+ do {
+ recv_freed = atomic_read(&service_info->recv_freed);
+ freed_acks = min_t(int, USHRT_MAX, recv_freed);
+ } while (recv_freed != atomic_cmpxchg(
+ &service_info->recv_freed,
+ recv_freed, recv_freed - freed_acks));
+
+ if (freed_acks) {
+ if (freed_acks < recv_freed)
+ transport->free_bufs_pending = true;
+
+ *(p++) = service_info->service->id;
+ *(p++) = freed_acks;
+ count++;
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT,
+ transport->session_dev,
+ transport->axon_dev,
+ " [%.2d] Freed %.2d buffers\n",
+ service_info->service->id,
+ freed_acks);
+ } else {
+ vs_dev_debug(VS_DEBUG_TRANSPORT,
+ transport->session_dev,
+ transport->axon_dev,
+ " [%.2d] No buffers to free\n",
+ service_info->service->id);
+ }
+
+ transport_put_service_info(service_info);
+ }
+
+ if (transport->free_bufs_pending)
+ schedule_delayed_work(dwork, 0);
+
+ if (count == 0 && old_balance >= 0) {
+ /*
+ * We are sending a new free bufs message, but we have no
+ * freed buffers to tell the other end about. We don't send
+ * an empty message unless the pre-increment balance was
+ * negative (in which case we need to ack a remote free_bufs).
+ *
+ * Note that nobody else can increase the balance, so we only
+ * need to check for a non-negative balance once before
+ * decrementing. However, if the incoming free-bufs handler
+ * concurrently decrements, the balance may become negative,
+ * in which case we reschedule ourselves immediately to send
+ * the ack.
+ */
+ if (atomic_dec_return(&transport->free_bufs_balance) < 0)
+ schedule_delayed_work(dwork, 0);
+
+ __transport_free_mbuf(transport, mbuf, false);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "No services had buffers to free\n");
+
+ return;
+ }
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Sending free bufs message for %d services\n", count);
+
+ /* Fix up the message size */
+ p = mbuf->base.data;
+ *p = count;
+ mbuf->base.size = sizeof(u16) * ((count * 2) + 1);
+
+ spin_lock_irq(&transport->readiness_lock);
+ err = transport_send_might_queue(transport, mbuf, MSG_SEND_FREE_BUFS,
+ 0, &queued);
+ if (err) {
+ spin_unlock_irq(&transport->readiness_lock);
+ goto fail;
+ }
+
+ /* FIXME: Jira ticket SDK-4675 - ryanm. */
+ if (!queued) {
+ /*
+ * The mbuf was sent successfully. We can free it locally
+ * since it is now owned by the remote end.
+ */
+ transport_free_sent_mbuf(transport, mbuf);
+ }
+ spin_unlock_irq(&transport->readiness_lock);
+
+ return;
+
+fail:
+ dev_err(transport->axon_dev,
+ "Failed to send free bufs message: %d\n", err);
+ transport_fatal_error(transport, "free bufs send failed");
+}
+
+int transport_notify(struct vs_transport *_transport,
+ struct vs_service_device *service, unsigned long bits)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ unsigned long bit_offset, bitmask, word;
+ int first_set_bit, spilled_bits;
+
+ BUG_ON(!transport);
+
+ if (!bits)
+ return -EINVAL;
+
+ /* Check that the service isn't trying to raise bits it doesn't own */
+ if (bits & ~((1UL << service->notify_send_bits) - 1))
+ return -EINVAL;
+
+ bit_offset = service->notify_send_offset;
+ word = BIT_WORD(bit_offset);
+ bitmask = bits << (bit_offset % BITS_PER_LONG);
+
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "Sending notification %ld to service id %d\n", bitmask,
+ service->id);
+
+ _okl4_sys_vinterrupt_raise(transport->notify_cap[word], bitmask);
+
+ /*
+ * Bit range may spill into the next virqline.
+ *
+ * Check by adding the bit offset to the index of the highest set bit in
+ * the requested bitmask. If we need to raise a bit that is greater than
+ * bit 31, we have spilled into the next word and need to raise that too.
+ */
+ first_set_bit = find_first_bit(&bits, BITS_PER_LONG);
+ spilled_bits = first_set_bit + bit_offset - (BITS_PER_LONG - 1);
+ if (spilled_bits > 0) {
+ /*
+ * Calculate the new bitmask for the spilled bits. We do this by
+ * shifting the requested bits to the right. The number of shifts
+ * is determined on where the first spilled bit is.
+ */
+ int first_spilled_bit = first_set_bit - spilled_bits + 1;
+
+ bitmask = bits >> first_spilled_bit;
+
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "Sending notification %ld to service id %d\n", bitmask,
+ service->id);
+
+ _okl4_sys_vinterrupt_raise(transport->notify_cap[word + 1], bitmask);
+ }
+
+ return 0;
+}
+
+static void
+transport_handle_free_bufs_message(struct vs_transport_axon *transport,
+ struct vs_mbuf_axon *mbuf)
+{
+ struct vs_mv_service_info *service_info;
+ vs_service_id_t service_id;
+ u16 *p = mbuf->base.data;
+ int i, count, freed_acks, new_balance;
+
+ count = *(p++);
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Free bufs message received for %d services\n", count);
+ for (i = 0; i < count; i++) {
+ int old_quota __maybe_unused;
+
+ service_id = *(p++);
+ freed_acks = *(p++);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, " [%.2d] %.4d\n",
+ service_id, freed_acks);
+
+ service_info = transport_get_service_id_info(transport,
+ service_id);
+ if (!service_info) {
+ vs_dev_debug(VS_DEBUG_TRANSPORT,
+ transport->session_dev,
+ transport->axon_dev,
+ "Got %d free_acks for unknown service %d\n",
+ freed_acks, service_id);
+ continue;
+ }
+
+ old_quota = atomic_read(&service_info->send_inflight);
+ freed_acks = reduce_send_quota(transport, service_info,
+ freed_acks, service_info->ready);
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ " [%.2d] Freed %.2d buffers (%d -> %d, quota = %d)\n",
+ service_id, freed_acks, old_quota,
+ atomic_read(&service_info->send_inflight),
+ service_info->service->send_quota);
+
+ transport_put_service_info(service_info);
+ }
+
+ __transport_free_mbuf(transport, mbuf, true);
+
+ new_balance = atomic_dec_return(&transport->free_bufs_balance);
+ if (new_balance < -MAX_BALANCE) {
+ dev_err(transport->axon_dev,
+ "Balance counter fell below -MAX_BALANCE (%d < %d)\n",
+ atomic_read(&transport->free_bufs_balance),
+ -MAX_BALANCE);
+ transport_fatal_error(transport, "balance counter underrun");
+ return;
+ }
+
+ /* Check if we need to send a freed buffers message back */
+ if (new_balance < 0 || transport->free_bufs_pending)
+ schedule_delayed_work(&transport->free_bufs_work, 0);
+}
+
+static int transport_rx_queue_buffer(struct vs_transport_axon *transport,
+ void *ptr, dma_addr_t laddr)
+{
+ struct okl4_axon_queue_entry *desc;
+ okl4_axon_data_info_t info;
+
+ /* Select the buffer desc to reallocate */
+ desc = &transport->rx_descs[transport->rx_uptr_allocated];
+ info = ACCESS_ONCE(desc->info);
+
+ /* If there is no space in the rx queue, fail */
+ if (okl4_axon_data_info_getusr(&info))
+ return -ENOSPC;
+
+ /* Don't update desc before reading the clear usr bit */
+ smp_mb();
+
+ /* Update the buffer pointer in the desc and mark it valid. */
+ transport->rx_ptrs[transport->rx_uptr_allocated] = ptr;
+ okl4_axon_data_info_setladdr(&info, (okl4_laddr_t)laddr);
+ okl4_axon_data_info_setpending(&info, true);
+ okl4_axon_data_info_setusr(&info, true);
+ mb();
+ ACCESS_ONCE(desc->info) = info;
+
+ /* Proceed to the next buffer */
+ INC_MOD(transport->rx_uptr_allocated,
+ transport->rx->queues[0].entries);
+
+ /* Return true if the next desc has no buffer yet */
+ desc = &transport->rx_descs[transport->rx_uptr_allocated];
+ return !okl4_axon_data_info_getusr(&desc->info);
+}
+
+/* TODO: multiple queue support / small message prioritisation */
+static int transport_process_msg(struct vs_transport_axon *transport)
+{
+ struct vs_mv_service_info *service_info;
+ struct vs_mbuf_axon *mbuf;
+ vs_service_id_t service_id;
+ unsigned freed_acks;
+ u32 uptr;
+ struct okl4_axon_queue_entry *desc;
+ void **ptr;
+ okl4_axon_data_info_t info;
+
+ /* Select the descriptor to receive from */
+ uptr = ACCESS_ONCE(transport->rx->queues[0].uptr);
+ desc = &transport->rx_descs[uptr];
+ ptr = &transport->rx_ptrs[uptr];
+ info = ACCESS_ONCE(desc->info);
+
+ /* Have we emptied the whole queue? */
+ if (!okl4_axon_data_info_getusr(&info))
+ return -ENOBUFS;
+
+ /* Has the next buffer been filled yet? */
+ if (okl4_axon_data_info_getpending(&info))
+ return 0;
+
+ /* Don't read the buffer or desc before seeing a cleared pending bit */
+ rmb();
+
+ /* Is the message too small to be valid? */
+ if (desc->data_size < sizeof(vs_service_id_t))
+ return -EBADMSG;
+
+ /* Allocate and set up the mbuf */
+ mbuf = kmem_cache_alloc(mbuf_cache, GFP_ATOMIC);
+ if (!mbuf)
+ return -ENOMEM;
+
+ mbuf->owner = transport;
+ mbuf->laddr = okl4_axon_data_info_getladdr(&info);
+ mbuf->pool = NULL;
+ mbuf->base.is_recv = true;
+ mbuf->base.data = *ptr + sizeof(vs_service_id_t);
+ mbuf->base.size = desc->data_size - sizeof(vs_service_id_t);
+
+ INC_MOD(uptr, transport->rx->queues[0].entries);
+ ACCESS_ONCE(transport->rx->queues[0].uptr) = uptr;
+
+ /* Finish reading desc before clearing usr bit */
+ smp_mb();
+
+ /* Re-check the pending bit, in case we've just been reset */
+ info = ACCESS_ONCE(desc->info);
+ if (unlikely(okl4_axon_data_info_getpending(&info))) {
+ kmem_cache_free(mbuf_cache, mbuf);
+ return 0;
+ }
+
+ /* Clear usr bit; after this point the buffer is owned by the mbuf */
+ okl4_axon_data_info_setusr(&info, false);
+ ACCESS_ONCE(desc->info) = info;
+
+ /* Determine who to deliver the mbuf to */
+ service_id = transport_get_mbuf_service_id(transport,
+ mbuf_real_base(mbuf), &freed_acks);
+
+ if (service_id == MSG_SEND_FREE_BUFS) {
+ transport_handle_free_bufs_message(transport, mbuf);
+ return 1;
+ }
+
+ service_info = transport_get_service_id_info(transport, service_id);
+ if (!service_info) {
+ vs_dev_debug(VS_DEBUG_TRANSPORT,
+ transport->session_dev, transport->axon_dev,
+ "discarding message for missing service %d\n",
+ service_id);
+ __transport_free_mbuf(transport, mbuf, true);
+ return -EIDRM;
+ }
+
+ /*
+ * If the remote end has freed some buffers that we sent it, then we
+ * can decrement our send quota count by that amount.
+ */
+ freed_acks = reduce_send_quota(transport, service_info,
+ freed_acks, service_info->ready);
+
+ /* If the service has been reset, drop the message. */
+ if (!service_info->ready) {
+ vs_dev_debug(VS_DEBUG_TRANSPORT,
+ transport->session_dev, transport->axon_dev,
+ "discarding message for reset service %d\n",
+ service_id);
+
+ __transport_free_mbuf(transport, mbuf, true);
+ transport_put_service_info(service_info);
+
+ return 1;
+ }
+
+ /*
+ * Increment our recv quota since we are now holding a buffer. We
+ * will decrement it when the buffer is freed in transport_free_mbuf.
+ */
+ if (!atomic_add_unless(&service_info->recv_inflight, 1,
+ service_info->service->recv_quota)) {
+ /*
+ * Going over the recv_quota indicates that something bad
+ * has happened because either the other end has exceeded
+ * its send quota or the two ends have a disagreement about
+ * what the quota is.
+ *
+ * We free the buffer and reset the transport.
+ */
+ dev_err(transport->axon_dev,
+ "Service %d is at max receive quota %d - resetting\n",
+ service_info->service->id,
+ service_info->service->recv_quota);
+
+ transport_fatal_error(transport, "rx quota exceeded");
+
+ __transport_free_mbuf(transport, mbuf, true);
+ transport_put_service_info(service_info);
+
+ return 0;
+ }
+
+ WARN_ON(atomic_read(&service_info->recv_inflight) >
+ service_info->service->recv_quota);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "receive %zu bytes from service 0x%.2x (%d): sq=%d/%d, rq=%d/%d, freed_acks=%d, freed=%d/%d bc=%d\n",
+ mbuf->base.size, service_info->service->id, service_id,
+ atomic_read(&service_info->send_inflight),
+ service_info->service->send_quota,
+ atomic_read(&service_info->recv_inflight),
+ service_info->service->recv_quota, freed_acks,
+ atomic_read(&service_info->recv_freed),
+ service_info->recv_freed_watermark,
+ atomic_read(&transport->free_bufs_balance));
+ vs_debug_dump_mbuf(transport->session_dev, &mbuf->base);
+
+ if (vs_session_handle_message(transport->session_dev, &mbuf->base,
+ service_id) < 0)
+ transport_free_mbuf(&transport->transport,
+ service_info->service, &mbuf->base);
+
+ transport_put_service_info(service_info);
+
+ return 1;
+}
+
+static void transport_flush_tx_queues(struct vs_transport_axon *transport)
+{
+ okl4_error_t err;
+ int i;
+
+ lockdep_assert_held(&transport->readiness_lock);
+
+ /* Release any queued mbufs */
+ free_tx_mbufs(transport);
+
+ /*
+ * Re-attach the TX Axon's segment, which implicitly invalidates
+ * the queues and stops any outgoing message transfers. The queues
+ * will be reconfigured when the transport becomes ready again.
+ */
+ err = _okl4_sys_axon_set_send_segment(transport->tx_cap,
+ transport->segment, transport->segment_base);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "TX reattach failed: %d\n",
+ (int)err);
+ }
+
+ /*
+ * The TX Axon has stopped, so we can safely clear the pending
+ * bit and free the buffer for any outgoing messages, and reset uptr
+ * and kptr to 0.
+ */
+ for (i = 0; i < transport->tx->queues[0].entries; i++) {
+ if (!transport->tx_pools[i])
+ continue;
+
+ okl4_axon_data_info_setpending(
+ &transport->tx_descs[i].info, false);
+ __transport_tx_pool_free(transport->tx_pools[i],
+ okl4_axon_data_info_getladdr(
+ &transport->tx_descs[i].info));
+ transport->tx_pools[i] = NULL;
+ }
+ transport->tx->queues[0].uptr = 0;
+ transport->tx->queues[0].kptr = 0;
+ transport->tx_uptr_freed = 0;
+}
+
+static void transport_flush_rx_queues(struct vs_transport_axon *transport)
+{
+ okl4_error_t err;
+ int i;
+
+ lockdep_assert_held(&transport->readiness_lock);
+
+ /*
+ * Re-attach the TX Axon's segment, which implicitly invalidates
+ * the queues and stops any incoming message transfers, though those
+ * should already have cancelled those at the sending end. The queues
+ * will be reconfigured when the transport becomes ready again.
+ */
+ err = _okl4_sys_axon_set_recv_segment(transport->rx_cap,
+ transport->segment, transport->segment_base);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "RX reattach failed: %d\n",
+ (int)err);
+ }
+
+ /*
+ * The RX Axon has stopped, so we can reset the pending bit on all
+ * allocated message buffers to prepare them for reuse when the reset
+ * completes.
+ */
+ for (i = 0; i < transport->rx->queues[0].entries; i++) {
+ if (okl4_axon_data_info_getusr(&transport->rx_descs[i].info))
+ okl4_axon_data_info_setpending(
+ &transport->rx_descs[i].info, true);
+ }
+
+ /*
+ * Reset kptr to the current uptr.
+ *
+ * We use a barrier here to ensure the pending bits are reset before
+ * reading uptr, matching the barrier in transport_process_msg between
+ * the uptr update and the second check of the pending bit. This means
+ * that races with transport_process_msg() will end in one of two
+ * ways:
+ *
+ * 1. transport_process_msg() updates uptr before this barrier, so the
+ * RX buffer is passed up to the session layer to be rejected there
+ * and recycled; or
+ *
+ * 2. the reset pending bit is seen by the second check in
+ * transport_process_msg(), which knows that it is being reset and
+ * can drop the message before it claims the buffer.
+ */
+ smp_mb();
+ transport->rx->queues[0].kptr =
+ ACCESS_ONCE(transport->rx->queues[0].uptr);
+
+ /*
+ * Cancel any pending freed bufs work. We can't flush it here, but
+ * that is OK: we will do so before we become ready.
+ */
+ cancel_delayed_work(&transport->free_bufs_work);
+}
+
+static bool transport_axon_reset(struct vs_transport_axon *transport)
+{
+ okl4_error_t err;
+ unsigned long flags;
+ bool reset_complete = false;
+
+ spin_lock_irqsave(&transport->readiness_lock, flags);
+
+ /*
+ * Reset the transport, dumping any messages in transit, and tell the
+ * remote end that it should do the same.
+ *
+ * We only do this if the transport is not already marked reset. Doing
+ * otherwise would be redundant.
+ */
+ if ((transport->readiness != VS_TRANSPORT_RESET) &&
+ transport->readiness != VS_TRANSPORT_LOCAL_RESET &&
+ transport->readiness != VS_TRANSPORT_REMOTE_READY) {
+ /*
+ * Flush the Axons' TX queues. We can't flush the RX queues
+ * until after the remote end has acknowledged the reset.
+ */
+ transport_flush_tx_queues(transport);
+
+ /*
+ * Raise a reset request VIRQ, and discard any incoming reset
+ * or ready notifications as they are now stale. Note that we
+ * must do this in a single syscall.
+ */
+ err = _okl4_sys_vinterrupt_clear_and_raise(
+ transport->reset_okl4_irq,
+ transport->reset_cap, 0UL,
+ VS_TRANSPORT_VIRQ_RESET_REQ).error;
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "Reset raise failed: %d\n",
+ (int)err);
+ }
+
+ /* Local reset is complete */
+ if (transport->readiness != VS_TRANSPORT_SHUTDOWN)
+ transport->readiness = VS_TRANSPORT_LOCAL_RESET;
+ } else {
+ /* Already in reset */
+ reset_complete = true;
+ }
+
+ spin_unlock_irqrestore(&transport->readiness_lock, flags);
+
+ return reset_complete;
+}
+
+static void transport_reset(struct vs_transport *_transport)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, "reset\n");
+
+ if (transport_axon_reset(transport)) {
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "reset while already reset (no-op)\n");
+
+ vs_session_handle_reset(transport->session_dev);
+ }
+}
+
+static void transport_ready(struct vs_transport *_transport)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ okl4_error_t err;
+
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "%s: becoming ready\n", __func__);
+
+ /*
+ * Make sure any previously scheduled freed bufs work is cancelled.
+ * It should not be possible for this to be rescheduled later, as long
+ * as the transport is in reset.
+ */
+ cancel_delayed_work_sync(&transport->free_bufs_work);
+ spin_lock_irq(&transport->readiness_lock);
+
+ atomic_set(&transport->free_bufs_balance, 0);
+ transport->free_bufs_pending = false;
+
+ switch(transport->readiness) {
+ case VS_TRANSPORT_RESET:
+ transport->readiness = VS_TRANSPORT_LOCAL_READY;
+ break;
+ case VS_TRANSPORT_REMOTE_READY:
+ vs_session_handle_activate(transport->session_dev);
+ transport->readiness = VS_TRANSPORT_ACTIVE;
+ break;
+ case VS_TRANSPORT_LOCAL_RESET:
+ /*
+ * Session layer is confused; usually due to the reset at init
+ * time, which it did not explicitly request, not having
+ * completed yet. We just ignore it and wait for the reset. We
+ * could avoid this by not starting the session until the
+ * startup reset completes.
+ */
+ spin_unlock_irq(&transport->readiness_lock);
+ return;
+ case VS_TRANSPORT_SHUTDOWN:
+ /* Do nothing. */
+ spin_unlock_irq(&transport->readiness_lock);
+ return;
+ default:
+ /* Session layer is broken */
+ WARN(1, "transport_ready() called in the wrong state: %d",
+ transport->readiness);
+ goto fail;
+ }
+
+ /* Raise a ready notification VIRQ. */
+ err = _okl4_sys_vinterrupt_raise(transport->reset_cap,
+ VS_TRANSPORT_VIRQ_READY);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "Ready raise failed: %d\n",
+ (int)err);
+ goto fail;
+ }
+
+ /*
+ * Set up the Axons' queue pointers.
+ */
+ err = _okl4_sys_axon_set_send_area(transport->tx_cap,
+ transport->tx_phys, transport->tx_size);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "TX set area failed: %d\n",
+ (int)err);
+ goto fail;
+ }
+
+ err = _okl4_sys_axon_set_send_queue(transport->tx_cap,
+ transport->tx_phys);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "TX set queue failed: %d\n",
+ (int)err);
+ goto fail;
+ }
+
+ err = _okl4_sys_axon_set_recv_area(transport->rx_cap,
+ transport->rx_phys, transport->rx_size);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "RX set area failed: %d\n",
+ (int)err);
+ goto fail;
+ }
+
+ err = _okl4_sys_axon_set_recv_queue(transport->rx_cap,
+ transport->rx_phys);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "RX set queue failed: %d\n",
+ (int)err);
+ goto fail;
+ }
+
+ spin_unlock_irq(&transport->readiness_lock);
+ return;
+
+fail:
+ spin_unlock_irq(&transport->readiness_lock);
+
+ transport_axon_reset(transport);
+}
+
+static int transport_service_add(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_mv_service_info *service_info;
+
+ /*
+ * We can't print out the core service add because the session
+ * isn't fully registered at that time.
+ */
+ if (service->id != 0)
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev,
+ "Add service - id = %d\n", service->id);
+
+ service_info = kzalloc(sizeof(*service_info), GFP_KERNEL);
+ if (!service_info)
+ return -ENOMEM;
+
+ kref_init(&service_info->kref);
+
+ /* Matching vs_put_service() is in transport_info_free */
+ service_info->service = vs_get_service(service);
+
+ /* Make the service_info visible */
+ rcu_assign_pointer(service->transport_priv, service_info);
+
+ __set_bit(service->id, transport->service_bitmap);
+
+ return 0;
+}
+
+static void transport_service_remove(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_mv_service_info *service_info;
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, "Remove service - id = %d\n",
+ service->id);
+
+ __clear_bit(service->id, transport->service_bitmap);
+
+ service_info = service->transport_priv;
+ rcu_assign_pointer(service->transport_priv, NULL);
+
+ if (service_info->ready) {
+ dev_err(transport->axon_dev,
+ "Removing service %d while ready\n",
+ service->id);
+ transport_fatal_error(transport, "removing ready service");
+ }
+
+ transport_put_service_info(service_info);
+}
+
+static struct vs_axon_tx_pool *
+transport_axon_init_tx_pool(struct vs_transport_axon *transport,
+ size_t msg_size, unsigned send_quota)
+{
+ struct vs_axon_tx_pool *pool;
+
+ pool = devm_kzalloc(transport->axon_dev, sizeof(*pool) +
+ (sizeof(unsigned long) * BITS_TO_LONGS(send_quota)),
+ GFP_KERNEL);
+ if (!pool)
+ return ERR_PTR(-ENOMEM);
+
+ pool->transport = transport;
+ pool->alloc_order = ilog2(msg_size + sizeof(vs_service_id_t));
+ pool->count = send_quota;
+
+ pool->base_vaddr = dmam_alloc_coherent(transport->axon_dev,
+ send_quota << pool->alloc_order, &pool->base_laddr,
+ GFP_KERNEL);
+ if (!pool->base_vaddr) {
+ dev_err(transport->axon_dev, "Couldn't allocate %lu times %zu bytes for TX\n",
+ (unsigned long)pool->count, (size_t)1 << pool->alloc_order);
+ devm_kfree(transport->axon_dev, pool);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ kref_init(&pool->kref);
+ return pool;
+}
+
+static int transport_service_start(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_mv_service_info *service_info;
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_notify_info *info;
+ int i, ret;
+ bool enable_rx;
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, "Start service - id = %d\n",
+ service->id);
+
+ service_info = service->transport_priv;
+ __transport_get_service_info(service_info);
+
+ /* We shouldn't have any mbufs left from before the last reset. */
+ if (WARN_ON(atomic_read(&service_info->outstanding_frees))) {
+ transport_put_service_info(service_info);
+ return -EBUSY;
+ }
+
+ /*
+ * The watermark is set to half of the received-message quota, rounded
+ * down, plus one. This is fairly arbitrary. The constant offset
+ * ensures that we don't set it to 0 for services with 1 quota (and
+ * thus trigger infinite free_bufs messages).
+ */
+ service_info->recv_freed_watermark = (service->recv_quota + 1) / 2;
+
+ if (WARN_ON(service->notify_recv_bits + service->notify_recv_offset >
+ transport->notify_rx_nirqs * BITS_PER_LONG)) {
+ transport_put_service_info(service_info);
+ return -EINVAL;
+ }
+
+ if (WARN_ON(service->notify_send_bits + service->notify_send_offset >
+ transport->notify_tx_nirqs * BITS_PER_LONG)) {
+ transport_put_service_info(service_info);
+ return -EINVAL;
+ }
+
+ /* This is called twice for the core client only. */
+ WARN_ON(service->id != 0 && service_info->ready);
+
+ if (!service_info->ready) {
+ WARN_ON(atomic_read(&service_info->send_alloc));
+ WARN_ON(atomic_read(&service_info->recv_freed));
+ WARN_ON(atomic_read(&service_info->recv_inflight));
+ }
+
+ /* Create the TX buffer pool. */
+ WARN_ON(service->send_quota && service_info->tx_pool);
+ if (service->send_quota) {
+ service_info->tx_pool = transport_axon_init_tx_pool(transport,
+ transport->msg_size, service->send_quota);
+ if (IS_ERR(service_info->tx_pool)) {
+ ret = PTR_ERR(service_info->tx_pool);
+ service_info->tx_pool = NULL;
+ transport_put_service_info(service_info);
+ return ret;
+ }
+ }
+
+ /* Preallocate some RX buffers, if necessary. */
+ spin_lock_irq(&transport->rx_alloc_lock);
+ i = min(transport->rx_alloc_extra,
+ service->recv_quota - service_info->rx_allocated);
+ transport->rx_alloc_extra -= i;
+ service_info->rx_allocated += i;
+ spin_unlock_irq(&transport->rx_alloc_lock);
+
+ for (; service_info->rx_allocated < service->recv_quota;
+ service_info->rx_allocated++) {
+ dma_addr_t laddr;
+ struct vs_axon_rx_freelist_entry *buf =
+ dma_pool_alloc(transport->rx_pool, GFP_KERNEL, &laddr);
+ if (WARN_ON(!buf))
+ break;
+ buf->laddr = laddr;
+
+ spin_lock_irq(&transport->rx_alloc_lock);
+ list_add(&buf->list, &transport->rx_freelist);
+ spin_unlock_irq(&transport->rx_alloc_lock);
+ }
+
+ for (i = 0; i < service->notify_recv_bits; i++) {
+ unsigned bit = i + service->notify_recv_offset;
+ info = &transport->transport.notify_info[bit];
+
+ info->service_id = service->id;
+ info->offset = service->notify_recv_offset;
+ }
+
+ atomic_set(&service_info->send_inflight, 0);
+
+ /*
+ * If this is the core service and it wasn't ready before, we need to
+ * enable RX for the whole transport.
+ */
+ enable_rx = service->id == 0 && !service_info->ready;
+
+ service_info->ready = true;
+
+ /* We're now ready to receive. */
+ if (enable_rx)
+ tasklet_enable(&transport->rx_tasklet);
+
+ transport_put_service_info(service_info);
+
+ return 0;
+}
+
+static int transport_service_reset(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_mv_service_info *service_info;
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+ struct vs_mbuf_axon *child, *tmp;
+ int ret = 0, service_id, send_remaining, recv_remaining;
+
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, "Reset service - id = %d\n",
+ service->id);
+
+ service_info = service->transport_priv;
+ __transport_get_service_info(service_info);
+
+ /*
+ * Clear the ready bit with the tasklet disabled. After this point,
+ * incoming messages will be discarded by transport_process_msg()
+ * without incrementing recv_inflight, so we won't spuriously see
+ * nonzero recv_inflight values for messages that would be discarded
+ * in the session layer.
+ */
+ tasklet_disable(&transport->rx_tasklet);
+ service_info->ready = false;
+ if (service->id)
+ tasklet_enable(&transport->rx_tasklet);
+
+ /*
+ * Cancel and free all pending outgoing messages for the service being
+ * reset; i.e. those that have been sent by the service but are not
+ * yet in the axon queue.
+ *
+ * Note that this does not clean out the axon queue; messages there
+ * are already visible to OKL4 and may be transferred at any time,
+ * so we treat those as already sent.
+ */
+ spin_lock_irq(&transport->readiness_lock);
+ list_for_each_entry_safe(child, tmp, &transport->tx_queue, base.queue) {
+ service_id = transport_get_mbuf_service_id(transport,
+ mbuf_real_base(child), NULL);
+ if (service_id == service->id) {
+ list_del(&child->base.queue);
+ __transport_tx_pool_free(child->pool, child->laddr);
+ }
+ }
+ spin_unlock_irq(&transport->readiness_lock);
+
+ /*
+ * If any buffers remain allocated, we mark them as outstanding frees.
+ * The transport will remain disabled until this count goes to zero.
+ */
+ send_remaining = atomic_read(&service_info->send_alloc);
+ recv_remaining = atomic_read(&service_info->recv_inflight);
+ ret = atomic_add_return(send_remaining + recv_remaining,
+ &service_info->outstanding_frees);
+ dev_dbg(transport->axon_dev, "reset service %d with %d outstanding (send %d, recv %d)\n",
+ service->id, ret, send_remaining, recv_remaining);
+
+ /*
+ * Reduce the send alloc count to 0, accounting for races with frees,
+ * which might have reduced either the alloc count or the outstanding
+ * count.
+ */
+ while (send_remaining > 0) {
+ unsigned new_send_remaining = atomic_cmpxchg(
+ &service_info->send_alloc, send_remaining, 0);
+ if (send_remaining == new_send_remaining) {
+ smp_mb();
+ break;
+ }
+ WARN_ON(send_remaining < new_send_remaining);
+ ret = atomic_sub_return(send_remaining - new_send_remaining,
+ &service_info->outstanding_frees);
+ send_remaining = new_send_remaining;
+ dev_dbg(transport->axon_dev, "failed to zero send quota, now %d outstanding (%d send)\n",
+ ret, send_remaining);
+ }
+
+ /* Repeat the above for the recv inflight count. */
+ while (recv_remaining > 0) {
+ unsigned new_recv_remaining = atomic_cmpxchg(
+ &service_info->recv_inflight, recv_remaining,
+ 0);
+ if (recv_remaining == new_recv_remaining) {
+ smp_mb();
+ break;
+ }
+ WARN_ON(recv_remaining < new_recv_remaining);
+ ret = atomic_sub_return(recv_remaining - new_recv_remaining,
+ &service_info->outstanding_frees);
+ recv_remaining = new_recv_remaining;
+ dev_dbg(transport->axon_dev, "failed to zero recv quota, now %d outstanding (%d send)\n",
+ ret, recv_remaining);
+ }
+
+ /* The outstanding frees count should never go negative */
+ WARN_ON(ret < 0);
+
+ /* Discard any outstanding freed buffer notifications. */
+ atomic_set(&service_info->recv_freed, 0);
+
+ /*
+ * Wait for any previously queued free_bufs work to finish. This
+ * guarantees that any freed buffer notifications that are already in
+ * progress will be sent to the remote end before we return, and thus
+ * before the reset is signalled.
+ */
+ flush_delayed_work(&transport->free_bufs_work);
+
+ if (!ret)
+ transport_free_mbuf_pools(transport, service, service_info);
+
+ transport_put_service_info(service_info);
+
+ return ret;
+}
+
+static ssize_t transport_service_send_avail(struct vs_transport *_transport,
+ struct vs_service_device *service)
+{
+ struct vs_mv_service_info *service_info;
+ ssize_t count = 0;
+
+ service_info = service->transport_priv;
+ if (!service_info)
+ return -EINVAL;
+
+ __transport_get_service_info(service_info);
+
+ count = service->send_quota -
+ atomic_read(&service_info->send_inflight);
+
+ transport_put_service_info(service_info);
+
+ return count < 0 ? 0 : count;
+}
+
+static void transport_get_notify_bits(struct vs_transport *_transport,
+ unsigned *send_notify_bits, unsigned *recv_notify_bits)
+{
+ struct vs_transport_axon *transport = to_vs_transport_axon(_transport);
+
+ *send_notify_bits = transport->notify_tx_nirqs * BITS_PER_LONG;
+ *recv_notify_bits = transport->notify_rx_nirqs * BITS_PER_LONG;
+}
+
+static void transport_get_quota_limits(struct vs_transport *_transport,
+ unsigned *send_quota, unsigned *recv_quota)
+{
+ /*
+ * This driver does not need to enforce a quota limit, because message
+ * buffers are allocated from the kernel heap rather than a fixed
+ * buffer area. The queue length only determines the maximum size of
+ * a message batch, and the number of preallocated RX buffers.
+ *
+ * Note that per-service quotas are still enforced; there is simply no
+ * hard limit on the total of all service quotas.
+ */
+
+ *send_quota = UINT_MAX;
+ *recv_quota = UINT_MAX;
+}
+
+static const struct vs_transport_vtable tvt = {
+ .alloc_mbuf = transport_alloc_mbuf,
+ .free_mbuf = transport_free_mbuf,
+ .mbuf_size = transport_mbuf_size,
+ .max_mbuf_size = transport_max_mbuf_size,
+ .send = transport_send,
+ .flush = transport_flush,
+ .notify = transport_notify,
+ .reset = transport_reset,
+ .ready = transport_ready,
+ .service_add = transport_service_add,
+ .service_remove = transport_service_remove,
+ .service_start = transport_service_start,
+ .service_reset = transport_service_reset,
+ .service_send_avail = transport_service_send_avail,
+ .get_notify_bits = transport_get_notify_bits,
+ .get_quota_limits = transport_get_quota_limits,
+};
+
+/* Incoming notification handling for client */
+static irqreturn_t transport_axon_notify_virq(int irq, void *priv)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+ struct vs_notify_info *n_info;
+ unsigned long offset, bit = 0, notification;
+ int word;
+ okl4_virq_flags_t payload = okl4_get_virq_payload(irq);
+
+ for (word = 0; word < transport->notify_rx_nirqs; word++)
+ if (irq == transport->notify_irq[word])
+ break;
+
+ if (word == transport->notify_rx_nirqs) {
+ dev_err(transport->axon_dev, "Bad IRQ %d\n", irq);
+ return IRQ_NONE;
+ }
+
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "Got notification irq\n");
+
+#if defined(__BIG_ENDIAN)
+ /*
+ * We rely on being able to use the Linux bitmap operations directly
+ * on the VIRQ payload.
+ */
+ BUILD_BUG_ON((sizeof(payload) % sizeof(unsigned long)) != 0);
+#endif
+
+ for_each_set_bit(bit, (unsigned long *)&payload, sizeof(payload) * 8) {
+ offset = bit + word * BITS_PER_LONG;
+
+ /*
+ * We need to know which service id is associated
+ * with which notification bit here. The transport is informed
+ * about notification bit - service id mapping during the
+ * initialhandshake protocol.
+ */
+ n_info = &transport->transport.notify_info[offset];
+
+ notification = 1UL << (offset - n_info->offset);
+ vs_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ "Got notification bit %lu for service %d\n",
+ notification, n_info->service_id);
+
+ /* FIXME: Jira ticket SDK-2145 - shivanik. */
+ vs_session_handle_notify(transport->session_dev, notification,
+ n_info->service_id);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t transport_axon_reset_irq(int irq, void *priv)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+ bool do_reset = false;
+
+ u32 payload = okl4_get_virq_payload(irq);
+
+ spin_lock(&transport->readiness_lock);
+
+ if (payload & VS_TRANSPORT_VIRQ_RESET_REQ) {
+ okl4_error_t err;
+
+ transport->readiness = VS_TRANSPORT_RESET;
+
+ /* Flush the queues in both directions */
+ transport_flush_tx_queues(transport);
+ transport_flush_rx_queues(transport);
+
+ /*
+ * When sending an ack, it is important to cancel any earlier
+ * ready notification, so the recipient can safely assume that
+ * the ack precedes any ready it sees
+ */
+ err = _okl4_sys_vinterrupt_modify(transport->reset_cap,
+ ~VS_TRANSPORT_VIRQ_READY,
+ VS_TRANSPORT_VIRQ_RESET_ACK);
+ if (err != OKL4_OK) {
+ dev_warn(transport->axon_dev,
+ "Error sending reset ack: %d\n", (int)err);
+ }
+
+ /*
+ * Discard any pending ready event; it must have happened
+ * before the reset request was raised, because we had not
+ * yet sent the reset ack.
+ */
+ payload = 0;
+ do_reset = true;
+ } else if (payload & VS_TRANSPORT_VIRQ_RESET_ACK) {
+ transport->readiness = VS_TRANSPORT_RESET;
+
+ /*
+ * Flush the RX queues, as we know at this point that the
+ * other end has flushed its TX queues.
+ */
+ transport_flush_rx_queues(transport);
+
+ /*
+ * Preserve any pending ready event; it must have been
+ * generated after the ack (see above)
+ */
+ payload &= VS_TRANSPORT_VIRQ_READY;
+ do_reset = true;
+ }
+
+ if (do_reset) {
+ /*
+ * Reset the session. Note that duplicate calls to this are
+ * expected if there are duplicate resets; they don't
+ * necessarily match activate calls.
+ */
+ vs_session_handle_reset(transport->session_dev);
+ }
+
+ if (payload & VS_TRANSPORT_VIRQ_READY) {
+ if (transport->readiness == VS_TRANSPORT_RESET) {
+ transport->readiness = VS_TRANSPORT_REMOTE_READY;
+ } else if (transport->readiness == VS_TRANSPORT_LOCAL_READY) {
+ vs_session_handle_activate(transport->session_dev);
+ transport->readiness = VS_TRANSPORT_ACTIVE;
+ } else {
+ /* Ready lost a race with reset; ignore it. */
+ }
+ }
+
+ spin_unlock(&transport->readiness_lock);
+
+ return IRQ_HANDLED;
+}
+
+/*
+ * Axon VIRQ handling.
+ */
+static irqreturn_t transport_axon_rx_irq(int irq, void *priv)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+
+ okl4_axon_virq_flags_t flags = okl4_get_virq_payload(irq);
+
+ if (okl4_axon_virq_flags_getfault(&flags)) {
+ dev_err_ratelimited(transport->axon_dev,
+ "fault on RX axon buffer or queue; resetting\n");
+ transport_axon_reset(transport);
+ } else if (okl4_axon_virq_flags_getready(&flags)) {
+ tasklet_schedule(&transport->rx_tasklet);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t transport_axon_tx_irq(int irq, void *priv)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)priv;
+
+ okl4_axon_virq_flags_t flags = okl4_get_virq_payload(irq);
+
+ if (okl4_axon_virq_flags_getfault(&flags)) {
+ dev_err_ratelimited(transport->axon_dev,
+ "fault on TX axon buffer or queue; resetting\n");
+ transport_axon_reset(transport);
+ } else if (okl4_axon_virq_flags_getready(&flags)) {
+ spin_lock(&transport->readiness_lock);
+ if (!list_empty(&transport->tx_queue))
+ tasklet_schedule(&transport->tx_tasklet);
+ spin_unlock(&transport->readiness_lock);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void transport_rx_tasklet(unsigned long data)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+ int status;
+ struct _okl4_sys_axon_process_recv_return recv_result;
+
+ /* Refill the RX queue */
+ spin_lock_irq(&transport->rx_alloc_lock);
+ while (!list_empty(&transport->rx_freelist)) {
+ struct vs_axon_rx_freelist_entry *buf;
+ buf = list_first_entry(&transport->rx_freelist,
+ struct vs_axon_rx_freelist_entry, list);
+ list_del(&buf->list);
+ status = transport_rx_queue_buffer(transport, buf, buf->laddr);
+ if (status < 0)
+ list_add(&buf->list, &transport->rx_freelist);
+ if (status <= 0)
+ break;
+ }
+ spin_unlock_irq(&transport->rx_alloc_lock);
+
+ /* Start the transfer */
+ recv_result = _okl4_sys_axon_process_recv(transport->rx_cap,
+ MAX_TRANSFER_CHUNK);
+
+ if (recv_result.error == OKL4_OK) {
+ status = 1;
+ } else {
+ status = okl4_error_to_errno(recv_result.error);
+ vs_dev_debug(VS_DEBUG_TRANSPORT, transport->session_dev,
+ transport->axon_dev, "rx syscall fail: %d",
+ status);
+ }
+
+ /* Process the received messages */
+ while (status > 0)
+ status = transport_process_msg(transport);
+
+ if (status == -ENOMEM) {
+ /* Give kswapd some time to reclaim pages */
+ mod_timer(&transport->rx_retry_timer, jiffies + HZ);
+ } else if (status == -ENOBUFS) {
+ /*
+ * Reschedule ourselves if more RX buffers are available,
+ * otherwise do nothing until a buffer is freed
+ */
+ spin_lock_irq(&transport->rx_alloc_lock);
+ if (!list_empty(&transport->rx_freelist))
+ tasklet_schedule(&transport->rx_tasklet);
+ spin_unlock_irq(&transport->rx_alloc_lock);
+ } else if (!status && !recv_result.send_empty) {
+ /* There are more messages waiting; reschedule */
+ tasklet_schedule(&transport->rx_tasklet);
+ } else if (status < 0 && status != -ECONNRESET) {
+ /* Something else went wrong, other than a reset */
+ dev_err(transport->axon_dev, "Fatal RX error %d\n", status);
+ transport_fatal_error(transport, "rx failure");
+ } else {
+ /* Axon is empty; wait for an RX interrupt */
+ }
+}
+
+static void transport_tx_tasklet(unsigned long data)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+ struct vs_mbuf_axon *mbuf;
+ vs_service_id_t service_id;
+ int err;
+
+ spin_lock_irq(&transport->readiness_lock);
+
+ /* Check to see if there is anything in the queue to send */
+ if (list_empty(&transport->tx_queue)) {
+ /*
+ * Queue is empty, probably because a service reset cancelled
+ * some pending messages. Nothing to do.
+ */
+ spin_unlock_irq(&transport->readiness_lock);
+ return;
+ }
+
+ /*
+ * Try to send the mbuf. If it can't, the channel must be
+ * full again so wait until the next can send event.
+ */
+ mbuf = list_first_entry(&transport->tx_queue, struct vs_mbuf_axon,
+ base.queue);
+
+ service_id = transport_get_mbuf_service_id(transport,
+ mbuf_real_base(mbuf), NULL);
+
+ err = __transport_send(transport, mbuf, service_id,
+ VS_TRANSPORT_SEND_FLAGS_MORE);
+ if (err == -ENOSPC) {
+ /*
+ * The channel is currently full. Leave the message in the
+ * queue and try again when it has emptied.
+ */
+ __transport_flush(transport);
+ goto out_unlock;
+ }
+ if (err) {
+ /*
+ * We cannot properly handle a message send error here because
+ * we have already returned success for the send to the service
+ * driver when the message was queued. We don't want to leave
+ * the message in the queue, since it could cause a DoS if the
+ * error is persistent. Give up and force a transport reset.
+ */
+ dev_err(transport->axon_dev,
+ "Failed to send queued mbuf: %d\n", err);
+ spin_unlock_irq(&transport->readiness_lock);
+ transport_fatal_error(transport, "queued send failure");
+ return;
+ }
+
+ /* Message sent, remove it from the queue and free the local copy */
+ list_del(&mbuf->base.queue);
+ transport_free_sent_mbuf(transport, mbuf);
+
+ /* Check to see if we have run out of messages to send */
+ if (list_empty(&transport->tx_queue)) {
+ /* Nothing left in the queue; flush and return */
+ __transport_flush(transport);
+ } else {
+ /* Reschedule to send the next message */
+ tasklet_schedule(&transport->tx_tasklet);
+ }
+
+out_unlock:
+ spin_unlock_irq(&transport->readiness_lock);
+}
+
+static void transport_rx_retry_timer(unsigned long data)
+{
+ struct vs_transport_axon *transport = (struct vs_transport_axon *)data;
+
+ /* Try to receive again; hopefully we have memory now */
+ tasklet_schedule(&transport->rx_tasklet);
+}
+
+/* Transport device management */
+
+static int alloc_notify_info(struct device *dev, struct vs_notify_info **info,
+ int *info_size, int virqs)
+{
+ /* Each VIRQ can handle BITS_PER_LONG notifications */
+ *info_size = sizeof(struct vs_notify_info) * (virqs * BITS_PER_LONG);
+ *info = devm_kzalloc(dev, *info_size, GFP_KERNEL);
+ if (!(*info))
+ return -ENOMEM;
+
+ memset(*info, 0, *info_size);
+ return 0;
+}
+
+static int transport_axon_probe_virqs(struct vs_transport_axon *transport)
+{
+ struct device *device = transport->axon_dev;
+ struct device_node *axon_node = device->of_node;
+ struct device_node *vs_node = transport->of_node;
+ struct irq_data *irqd;
+ struct property *irqlines;
+ int ret, num_virq_lines;
+ struct device_node *virq_node = NULL;
+ u32 cap;
+ int i, irq_count;
+
+ if (of_irq_count(axon_node) < 2) {
+ dev_err(device, "Missing axon interrupts\n");
+ return -ENODEV;
+ }
+
+ irq_count = of_irq_count(vs_node);
+ if (irq_count < 1) {
+ dev_err(device, "Missing reset interrupt\n");
+ return -ENODEV;
+ } else if (irq_count > 1 + MAX_NOTIFICATION_LINES) {
+ dev_warn(device,
+ "Too many notification interrupts; only the first %d will be used\n",
+ MAX_NOTIFICATION_LINES);
+ }
+
+ /* Find the TX and RX axon IRQs and the reset IRQ */
+ transport->tx_irq = irq_of_parse_and_map(axon_node, 0);
+ if (!transport->tx_irq) {
+ dev_err(device, "No TX IRQ\n");
+ return -ENODEV;
+ }
+
+ transport->rx_irq = irq_of_parse_and_map(axon_node, 1);
+ if (!transport->rx_irq) {
+ dev_err(device, "No RX IRQ\n");
+ return -ENODEV;
+ }
+
+ transport->reset_irq = irq_of_parse_and_map(vs_node, 0);
+ if (!transport->reset_irq) {
+ dev_err(device, "No reset IRQ\n");
+ return -ENODEV;
+ }
+ irqd = irq_get_irq_data(transport->reset_irq);
+ if (!irqd) {
+ dev_err(device, "No reset IRQ data\n");
+ return -ENODEV;
+ }
+ transport->reset_okl4_irq = irqd_to_hwirq(irqd);
+
+ /* Find the notification IRQs */
+ transport->notify_rx_nirqs = irq_count - 1;
+ for (i = 0; i < transport->notify_rx_nirqs; i++) {
+ transport->notify_irq[i] = irq_of_parse_and_map(vs_node,
+ i + 1);
+ if (!transport->notify_irq[i]) {
+ dev_err(device, "Bad notify IRQ\n");
+ return -ENODEV;
+ }
+ }
+
+ /* Find all outgoing virq lines */
+ irqlines = of_find_property(vs_node, "okl,interrupt-lines", NULL);
+ if (!irqlines || irqlines->length < sizeof(u32)) {
+ dev_err(device, "No VIRQ sources found");
+ return -ENODEV;
+ }
+ num_virq_lines = irqlines->length / sizeof(u32);
+
+ virq_node = of_parse_phandle(vs_node, "okl,interrupt-lines", 0);
+ if (!virq_node) {
+ dev_err(device, "No reset VIRQ line object\n");
+ return -ENODEV;
+ }
+ ret = of_property_read_u32(virq_node, "reg", &cap);
+ if (ret || cap == OKL4_KCAP_INVALID) {
+ dev_err(device, "Bad reset VIRQ line\n");
+ return -ENODEV;
+ }
+ transport->reset_cap = cap;
+
+ transport->notify_tx_nirqs = num_virq_lines - 1;
+ for (i = 0; i < transport->notify_tx_nirqs; i++) {
+ virq_node = of_parse_phandle(vs_node, "okl,interrupt-lines",
+ i + 1);
+ if (!virq_node) {
+ dev_err(device, "No notify VIRQ line object\n");
+ return -ENODEV;
+ }
+ ret = of_property_read_u32(virq_node, "reg", &cap);
+ if (ret || cap == OKL4_KCAP_INVALID) {
+ dev_err(device, "Bad notify VIRQ line\n");
+ return -ENODEV;
+ }
+ transport->notify_cap[i] = cap;
+ }
+
+ return 0;
+}
+
+static int transport_axon_request_irqs(struct vs_transport_axon *transport)
+{
+ struct device *device = transport->axon_dev;
+ int i, ret;
+
+ ret = devm_request_irq(device, transport->reset_irq,
+ transport_axon_reset_irq, IRQF_TRIGGER_HIGH,
+ dev_name(transport->axon_dev), transport);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(device, transport->tx_irq,
+ transport_axon_tx_irq, IRQF_TRIGGER_HIGH,
+ dev_name(transport->axon_dev), transport);
+ if (ret < 0)
+ return ret;
+
+ ret = devm_request_irq(device, transport->rx_irq,
+ transport_axon_rx_irq, IRQF_TRIGGER_HIGH,
+ dev_name(transport->axon_dev), transport);
+ if (ret < 0)
+ return ret;
+
+ for (i = 0; i < transport->notify_rx_nirqs; i++) {
+ ret = devm_request_irq(device, transport->notify_irq[i],
+ transport_axon_notify_virq, IRQF_TRIGGER_HIGH,
+ dev_name(transport->axon_dev), transport);
+ if (ret < 0)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int transport_axon_setup_descs(struct vs_transport_axon *transport)
+{
+ const int rx_buffer_order = ilog2(transport->msg_size +
+ sizeof(vs_service_id_t));
+ const size_t rx_queue_size = sizeof(*transport->rx) +
+ (sizeof(*transport->rx_descs) * transport->queue_size) +
+ (sizeof(*transport->rx_ptrs) * transport->queue_size);
+ const size_t tx_queue_size = sizeof(*transport->tx) +
+ (sizeof(*transport->tx_descs) * transport->queue_size);
+ const size_t queue_size = ALIGN(rx_queue_size,
+ __alignof__(*transport->tx)) + tx_queue_size;
+
+ struct _okl4_sys_mmu_lookup_pn_return lookup_return;
+ void *queue;
+ struct device_node *seg_node;
+ u32 seg_index;
+ okl4_kcap_t seg_cap;
+ okl4_error_t err;
+ dma_addr_t dma_handle;
+ const __be32 *prop;
+ int len, ret;
+
+ /*
+ * Allocate memory for the queue descriptors.
+ *
+ * We allocate one block for both rx and tx because the minimum
+ * allocation from dmam_alloc_coherent is usually a whole page.
+ */
+ ret = -ENOMEM;
+ queue = dmam_alloc_coherent(transport->axon_dev, queue_size,
+ &dma_handle, GFP_KERNEL);
+ if (queue == NULL) {
+ dev_err(transport->axon_dev, "Failed to allocate %zd bytes for queue descriptors\n",
+ queue_size);
+ goto fail_alloc_dma;
+ }
+ memset(queue, 0, queue_size);
+
+ /*
+ * Find the OKL4 physical segment object to attach to the axons.
+ *
+ * If the device has a CMA area, and the cell's memory segments have
+ * not been split unnecessarily, then all allocations through the DMA
+ * API for this device will be within a single segment. So, we can
+ * simply look up the segment that contains the queue.
+ *
+ * The location and size of the CMA area can be configured elsewhere.
+ * In 3.12 and later a device-specific area can be reserved via the
+ * standard device tree reserved-memory properties. Otherwise, the
+ * global area will be used, which has a size configurable on the
+ * kernel command line and defaults to 16MB.
+ */
+
+ /* Locate the physical segment */
+ ret = -ENODEV;
+ lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+ dma_handle >> OKL4_DEFAULT_PAGEBITS, -1);
+ err = okl4_mmu_lookup_index_geterror(&lookup_return.segment_index);
+ if (err == OKL4_ERROR_NOT_IN_SEGMENT) {
+ dev_err(transport->axon_dev,
+ "No segment found for DMA address %pK (%#llx)!\n",
+ queue, (unsigned long long)dma_handle);
+ goto fail_lookup_segment;
+ }
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev,
+ "Could not look up segment for DMA address %pK (%#llx): OKL4 error %d\n",
+ queue, (unsigned long long)dma_handle,
+ (int)err);
+ goto fail_lookup_segment;
+ }
+ seg_index = okl4_mmu_lookup_index_getindex(&lookup_return.segment_index);
+
+ dev_dbg(transport->axon_dev, "lookup pn %#lx got error %ld segment %ld count %lu offset %#lx\n",
+ (long)(dma_handle >> OKL4_DEFAULT_PAGEBITS),
+ (long)err, (long)seg_index,
+ (unsigned long)lookup_return.count_pn,
+ (unsigned long)lookup_return.offset_pn);
+
+ /* Locate the physical segment's OF node */
+ for_each_compatible_node(seg_node, NULL, "okl,microvisor-segment") {
+ u32 attach_index;
+ ret = of_property_read_u32(seg_node, "okl,segment-attachment",
+ &attach_index);
+ if (attach_index == seg_index)
+ break;
+ }
+ if (seg_node == NULL) {
+ ret = -ENXIO;
+ dev_err(transport->axon_dev, "No physical segment found for %pK\n",
+ queue);
+ goto fail_lookup_segment;
+ }
+
+ /* Determine the physical segment's cap */
+ prop = of_get_property(seg_node, "reg", &len);
+ ret = !!prop ? 0 : -EPERM;
+ if (!ret)
+ seg_cap = of_read_number(prop, of_n_addr_cells(seg_node));
+ if (!ret && seg_cap == OKL4_KCAP_INVALID)
+ ret = -ENXIO;
+ if (ret < 0) {
+ dev_err(transport->axon_dev, "missing physical-segment cap\n");
+ goto fail_lookup_segment;
+ }
+ transport->segment = seg_cap;
+ transport->segment_base =
+ (round_down(dma_handle >> OKL4_DEFAULT_PAGEBITS,
+ lookup_return.count_pn) -
+ lookup_return.offset_pn) << OKL4_DEFAULT_PAGEBITS;
+
+ dev_dbg(transport->axon_dev, "physical segment cap is %#lx, base %#llx\n",
+ (unsigned long)transport->segment,
+ (unsigned long long)transport->segment_base);
+
+ /* Attach the segment to the Axon endpoints */
+ err = _okl4_sys_axon_set_send_segment(transport->tx_cap,
+ transport->segment, transport->segment_base);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "TX attach failed: %d\n",
+ (int)err);
+ ret = okl4_error_to_errno(err);
+ goto fail_attach;
+ }
+
+ err = _okl4_sys_axon_set_recv_segment(transport->rx_cap,
+ transport->segment, transport->segment_base);
+ if (err != OKL4_OK) {
+ dev_err(transport->axon_dev, "RX attach failed: %d\n",
+ (int)err);
+ ret = okl4_error_to_errno(err);
+ goto fail_attach;
+ }
+
+ /* Array of pointers to the source TX pool for each outgoing buffer. */
+ transport->tx_pools = devm_kzalloc(transport->axon_dev,
+ sizeof(*transport->tx_pools) * transport->queue_size,
+ GFP_KERNEL);
+ if (!transport->tx_pools) {
+ err = -ENOMEM;
+ goto fail_alloc_tx_pools;
+ }
+
+ /* Set up the rx queue descriptors. */
+ transport->rx = queue;
+ transport->rx_phys = dma_handle;
+ transport->rx_size = rx_queue_size;
+ transport->rx_descs = (void *)(transport->rx + 1);
+ transport->rx_ptrs = (void *)(transport->rx_descs + transport->queue_size);
+ okl4_axon_queue_size_setallocorder(&transport->rx->queue_sizes[0],
+ rx_buffer_order);
+ transport->rx->queues[0].queue_offset = sizeof(*transport->rx);
+ transport->rx->queues[0].entries = transport->queue_size;
+ transport->rx->queues[0].uptr = 0;
+ transport->rx->queues[0].kptr = 0;
+ transport->rx_uptr_allocated = 0;
+
+ /* Set up the tx queue descriptors. */
+ transport->tx = queue + ALIGN(rx_queue_size,
+ __alignof__(*transport->tx));
+ transport->tx_phys = dma_handle + ((void *)transport->tx - queue);
+ transport->tx_size = tx_queue_size;
+ transport->tx_descs = (void *)(transport->tx + 1);
+ transport->tx->queues[0].queue_offset = sizeof(*transport->tx);
+ transport->tx->queues[0].entries = transport->queue_size;
+ transport->tx->queues[0].uptr = 0;
+ transport->tx->queues[0].kptr = 0;
+ transport->tx_uptr_freed = 0;
+
+ /* Create a DMA pool for the RX buffers. */
+ transport->rx_pool = dmam_pool_create("vs_axon_rx_pool",
+ transport->axon_dev, 1 << rx_buffer_order,
+ max(dma_get_cache_alignment(),
+ 1 << OKL4_PRESHIFT_LADDR_AXON_DATA_INFO), 0);
+
+ return 0;
+
+fail_alloc_tx_pools:
+fail_attach:
+fail_lookup_segment:
+ dmam_free_coherent(transport->axon_dev, queue_size, queue, dma_handle);
+fail_alloc_dma:
+ return ret;
+}
+
+static void transport_axon_free_descs(struct vs_transport_axon *transport)
+{
+ int i;
+
+ tasklet_disable(&transport->rx_tasklet);
+ tasklet_kill(&transport->rx_tasklet);
+
+ tasklet_disable(&transport->tx_tasklet);
+ tasklet_kill(&transport->tx_tasklet);
+
+ cancel_delayed_work_sync(&transport->free_bufs_work);
+
+ transport->tx = NULL;
+ transport->tx_descs = NULL;
+
+ for (i = 0; i < transport->rx->queues[0].entries; i++) {
+ struct okl4_axon_queue_entry *desc = &transport->rx_descs[i];
+
+ if (okl4_axon_data_info_getusr(&desc->info)) {
+ void *ptr = transport->rx_ptrs[i];
+ dma_addr_t dma = okl4_axon_data_info_getladdr(&desc->info);
+ dma_pool_free(transport->rx_pool, ptr, dma);
+ }
+ }
+
+ transport->rx = NULL;
+ transport->rx_descs = NULL;
+ transport->rx_ptrs = NULL;
+
+ /* Let devm free the queues so we don't have to keep the dma handle */
+}
+
+static int transport_axon_probe(struct platform_device *dev)
+{
+ struct vs_transport_axon *priv = NULL;
+ u32 cap[2];
+ u32 queue_size, msg_size;
+ int ret, i;
+ const char* name;
+
+ if (!dev_get_cma_area(&dev->dev) && !okl4_single_physical_segment) {
+ dev_err(&dev->dev, "Multiple physical segments, but CMA is disabled\n");
+ return -ENOSYS;
+ }
+
+ dev->dev.coherent_dma_mask = ~(u64)0;
+ dev->dev.archdata.dma_ops = &axon_dma_ops;
+
+ priv = devm_kzalloc(&dev->dev, sizeof(struct vs_transport_axon) +
+ sizeof(unsigned long), GFP_KERNEL);
+ if (priv == NULL) {
+ dev_err(&dev->dev, "create transport object failed\n");
+ ret = -ENOMEM;
+ goto err_alloc_priv;
+ }
+ dev_set_drvdata(&dev->dev, priv);
+
+ priv->of_node = of_get_child_by_name(dev->dev.of_node,
+ "virtual-session");
+ if ((!priv->of_node) ||
+ (!of_device_is_compatible(priv->of_node,
+ "okl,virtual-session"))) {
+ dev_err(&dev->dev, "missing virtual-session node\n");
+ ret = -ENODEV;
+ goto error_of_node;
+ }
+
+ name = dev->dev.of_node->full_name;
+ of_property_read_string(dev->dev.of_node, "label", &name);
+
+ if (of_property_read_bool(priv->of_node, "okl,is-client")) {
+ priv->is_server = false;
+ } else if (of_property_read_bool(priv->of_node, "okl,is-server")) {
+ priv->is_server = true;
+ } else {
+ dev_err(&dev->dev, "virtual-session node is not marked as client or server\n");
+ ret = -ENODEV;
+ goto error_of_node;
+ }
+
+ priv->transport.vt = &tvt;
+ priv->transport.type = "microvisor";
+ priv->axon_dev = &dev->dev;
+
+ /* Read the Axon caps */
+ ret = of_property_read_u32_array(dev->dev.of_node, "reg", cap, 2);
+ if (ret < 0 || cap[0] == OKL4_KCAP_INVALID ||
+ cap[1] == OKL4_KCAP_INVALID) {
+ dev_err(&dev->dev, "missing axon endpoint caps\n");
+ ret = -ENODEV;
+ goto error_of_node;
+ }
+ priv->tx_cap = cap[0];
+ priv->rx_cap = cap[1];
+
+ /* Set transport properties; default to a 64kb buffer */
+ queue_size = 16;
+ (void)of_property_read_u32(priv->of_node, "okl,queue-length",
+ &queue_size);
+ priv->queue_size = max((size_t)queue_size, MIN_QUEUE_SIZE);
+
+ msg_size = PAGE_SIZE - sizeof(vs_service_id_t);
+ (void)of_property_read_u32(priv->of_node, "okl,message-size",
+ &msg_size);
+ priv->msg_size = max((size_t)msg_size, MIN_MSG_SIZE);
+
+ /*
+ * Since the Axon API requires received message size limits to be
+ * powers of two, we must round up the message size (including the
+ * space reserved for the service ID).
+ */
+ priv->msg_size = roundup_pow_of_two(priv->msg_size +
+ sizeof(vs_service_id_t)) - sizeof(vs_service_id_t);
+ if (priv->msg_size != msg_size)
+ dev_info(&dev->dev, "message size rounded up from %zd to %zd\n",
+ (size_t)msg_size, priv->msg_size);
+
+ INIT_LIST_HEAD(&priv->tx_queue);
+
+ /* Initialise the activation state, tasklets, and RX retry timer */
+ spin_lock_init(&priv->readiness_lock);
+ priv->readiness = VS_TRANSPORT_INIT;
+
+ tasklet_init(&priv->rx_tasklet, transport_rx_tasklet,
+ (unsigned long)priv);
+ tasklet_init(&priv->tx_tasklet, transport_tx_tasklet,
+ (unsigned long)priv);
+
+ INIT_DELAYED_WORK(&priv->free_bufs_work, transport_free_bufs_work);
+ spin_lock_init(&priv->rx_alloc_lock);
+ priv->rx_alloc_extra = 0;
+ INIT_LIST_HEAD(&priv->rx_freelist);
+
+ setup_timer(&priv->rx_retry_timer, transport_rx_retry_timer,
+ (unsigned long)priv);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+ set_timer_slack(&priv->rx_retry_timer, HZ);
+#endif
+
+ /* Keep RX disabled until the core service is ready. */
+ tasklet_disable(&priv->rx_tasklet);
+
+ ret = transport_axon_probe_virqs(priv);
+ if (ret < 0)
+ goto err_probe_virqs;
+
+ if (priv->notify_rx_nirqs) {
+ ret = alloc_notify_info(&dev->dev, &priv->transport.notify_info,
+ &priv->transport.notify_info_size,
+ priv->notify_rx_nirqs);
+ if (ret < 0) {
+ dev_err(&dev->dev, "Alloc notify_info failed\n");
+ goto err_alloc_notify;
+ }
+ } else {
+ priv->transport.notify_info = NULL;
+ priv->transport.notify_info_size = 0;
+ }
+
+ priv->free_bufs_pool = transport_axon_init_tx_pool(priv, priv->msg_size,
+ FREE_BUFS_QUOTA);
+ if (IS_ERR(priv->free_bufs_pool)) {
+ ret = PTR_ERR(priv->free_bufs_pool);
+ goto err_init_free_bufs_pool;
+ }
+
+ ret = transport_axon_setup_descs(priv);
+ if (ret < 0)
+ goto err_setup_descs;
+
+ /* Allocate RX buffers for free bufs messages */
+ for (i = 0; i < FREE_BUFS_QUOTA; i++) {
+ dma_addr_t laddr;
+ struct vs_axon_rx_freelist_entry *buf =
+ dma_pool_alloc(priv->rx_pool, GFP_KERNEL, &laddr);
+ if (!buf)
+ goto err_alloc_rx_free_bufs;
+ buf->laddr = laddr;
+
+ spin_lock_irq(&priv->rx_alloc_lock);
+ list_add_tail(&buf->list, &priv->rx_freelist);
+ spin_unlock_irq(&priv->rx_alloc_lock);
+ }
+
+ /* Set up the session device */
+ priv->session_dev = vs_session_register(&priv->transport, &dev->dev,
+ priv->is_server, name);
+ if (IS_ERR(priv->session_dev)) {
+ ret = PTR_ERR(priv->session_dev);
+ dev_err(&dev->dev, "failed to register session: %d\n", ret);
+ goto err_session_register;
+ }
+
+ /*
+ * Start the core service. Note that it can't actually communicate
+ * until the initial reset completes.
+ */
+ vs_session_start(priv->session_dev);
+
+ /*
+ * Reset the transport. This will also set the Axons' segment
+ * attachments, and eventually the Axons' queue pointers (once the
+ * session marks the transport ready).
+ */
+ transport_reset(&priv->transport);
+
+ /*
+ * We're ready to start handling IRQs at this point, so register the
+ * handlers.
+ */
+ ret = transport_axon_request_irqs(priv);
+ if (ret < 0)
+ goto err_irq_register;
+
+ return 0;
+
+err_irq_register:
+ vs_session_unregister(priv->session_dev);
+err_session_register:
+err_alloc_rx_free_bufs:
+ transport_axon_free_descs(priv);
+err_setup_descs:
+ transport_axon_put_tx_pool(priv->free_bufs_pool);
+err_init_free_bufs_pool:
+ if (priv->transport.notify_info)
+ devm_kfree(&dev->dev, priv->transport.notify_info);
+err_alloc_notify:
+err_probe_virqs:
+ del_timer_sync(&priv->rx_retry_timer);
+ tasklet_kill(&priv->rx_tasklet);
+ tasklet_kill(&priv->tx_tasklet);
+ cancel_delayed_work_sync(&priv->free_bufs_work);
+error_of_node:
+ devm_kfree(&dev->dev, priv);
+err_alloc_priv:
+ return ret;
+}
+
+static int transport_axon_remove(struct platform_device *dev)
+{
+ struct vs_transport_axon *priv = dev_get_drvdata(&dev->dev);
+ int i;
+
+ for (i = 0; i < priv->notify_rx_nirqs; i++)
+ devm_free_irq(&dev->dev, priv->notify_irq[i], priv);
+
+ devm_free_irq(&dev->dev, priv->rx_irq, priv);
+ irq_dispose_mapping(priv->rx_irq);
+ devm_free_irq(&dev->dev, priv->tx_irq, priv);
+ irq_dispose_mapping(priv->tx_irq);
+ devm_free_irq(&dev->dev, priv->reset_irq, priv);
+ irq_dispose_mapping(priv->reset_irq);
+
+ del_timer_sync(&priv->rx_retry_timer);
+ tasklet_kill(&priv->rx_tasklet);
+ tasklet_kill(&priv->tx_tasklet);
+ cancel_delayed_work_sync(&priv->free_bufs_work);
+
+ priv->readiness = VS_TRANSPORT_SHUTDOWN;
+ vs_session_unregister(priv->session_dev);
+ WARN_ON(priv->readiness != VS_TRANSPORT_SHUTDOWN);
+
+ transport_axon_free_descs(priv);
+ transport_axon_put_tx_pool(priv->free_bufs_pool);
+
+ if (priv->transport.notify_info)
+ devm_kfree(&dev->dev, priv->transport.notify_info);
+
+ free_tx_mbufs(priv);
+
+ flush_workqueue(work_queue);
+
+ while (!list_empty(&priv->rx_freelist)) {
+ struct vs_axon_rx_freelist_entry *buf;
+ buf = list_first_entry(&priv->rx_freelist,
+ struct vs_axon_rx_freelist_entry, list);
+ list_del(&buf->list);
+ dma_pool_free(priv->rx_pool, buf, buf->laddr);
+ }
+
+ devm_kfree(&dev->dev, priv);
+ return 0;
+}
+
+static const struct of_device_id transport_axon_of_match[] = {
+ { .compatible = "okl,microvisor-axon-transport", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, transport_axon_of_match);
+
+static struct platform_driver transport_axon_driver = {
+ .probe = transport_axon_probe,
+ .remove = transport_axon_remove,
+ .driver = {
+ .name = DRIVER_NAME,
+ .owner = THIS_MODULE,
+ .bus = &platform_bus_type,
+ .of_match_table = of_match_ptr(transport_axon_of_match),
+ },
+};
+
+static int __init vs_transport_axon_init(void)
+{
+ int ret;
+ okl4_error_t err;
+ struct device_node *cpus;
+ struct zone *zone;
+ struct _okl4_sys_mmu_lookup_pn_return lookup_return;
+ u32 last_seen_attachment = -1;
+ bool first_attachment;
+
+ printk(KERN_INFO "Virtual Services transport driver for OKL4 Axons\n");
+
+ /* Allocate the Axon cleanup workqueue */
+ work_queue = alloc_workqueue("axon_cleanup", 0, 0);
+ if (!work_queue) {
+ ret = -ENOMEM;
+ goto fail_create_workqueue;
+ }
+
+ /* Locate the MMU capability, needed for lookups */
+ cpus = of_find_node_by_path("/cpus");
+ if (IS_ERR_OR_NULL(cpus)) {
+ ret = -EINVAL;
+ goto fail_mmu_cap;
+ }
+ ret = of_property_read_u32(cpus, "okl,vmmu-capability", &okl4_mmu_cap);
+ if (ret) {
+ goto fail_mmu_cap;
+ }
+ if (okl4_mmu_cap == OKL4_KCAP_INVALID) {
+ printk(KERN_ERR "%s: OKL4 MMU capability not found\n", __func__);
+ ret = -EPERM;
+ goto fail_mmu_cap;
+ }
+
+ /*
+ * Determine whether there are multiple OKL4 physical memory segments
+ * in this Cell. If so, every transport device must have a valid CMA
+ * region, to guarantee that its buffer allocations all come from the
+ * segment that is attached to the axon endpoints.
+ *
+ * We assume that each zone is contiguously mapped in stage 2 with a
+ * constant physical-to-IPA offset, typically 0. The weaver won't
+ * violate this assumption for Linux (or other HLOS) guests unless it
+ * is explicitly told to.
+ */
+ okl4_single_physical_segment = true;
+ first_attachment = true;
+ for_each_zone(zone) {
+ u32 attachment;
+
+ /* We only care about zones that the page allocator is using */
+ if (!zone->managed_pages)
+ continue;
+
+ /* Find the segment at the start of the zone */
+ lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+ zone->zone_start_pfn, -1);
+ err = okl4_mmu_lookup_index_geterror(
+ &lookup_return.segment_index);
+ if (err != OKL4_OK) {
+ printk(KERN_WARNING "%s: Unable to determine physical segment count, assuming >1\n",
+ __func__);
+ okl4_single_physical_segment = false;
+ break;
+ }
+ attachment = okl4_mmu_lookup_index_getindex(
+ &lookup_return.segment_index);
+
+ if (first_attachment) {
+ last_seen_attachment = attachment;
+ first_attachment = false;
+ } else if (last_seen_attachment != attachment) {
+ okl4_single_physical_segment = false;
+ break;
+ }
+
+ /* Find the segment at the end of the zone */
+ lookup_return = _okl4_sys_mmu_lookup_pn(okl4_mmu_cap,
+ zone_end_pfn(zone) - 1, -1);
+ err = okl4_mmu_lookup_index_geterror(
+ &lookup_return.segment_index);
+ if (err != OKL4_OK) {
+ printk(KERN_WARNING "%s: Unable to determine physical segment count, assuming >1\n",
+ __func__);
+ okl4_single_physical_segment = false;
+ break;
+ }
+ attachment = okl4_mmu_lookup_index_getindex(
+ &lookup_return.segment_index);
+
+ /* Check that it's still the same segment */
+ if (last_seen_attachment != attachment) {
+ okl4_single_physical_segment = false;
+ break;
+ }
+ }
+
+#ifdef DEBUG
+ printk(KERN_DEBUG "%s: physical segment count %s\n", __func__,
+ okl4_single_physical_segment ? "1" : ">1");
+#endif
+
+ mbuf_cache = KMEM_CACHE(vs_mbuf_axon, 0UL);
+ if (!mbuf_cache) {
+ ret = -ENOMEM;
+ goto kmem_cache_failed;
+ }
+
+ ret = platform_driver_register(&transport_axon_driver);
+ if (ret)
+ goto register_plat_driver_failed;
+
+ return ret;
+
+register_plat_driver_failed:
+ kmem_cache_destroy(mbuf_cache);
+ mbuf_cache = NULL;
+kmem_cache_failed:
+fail_mmu_cap:
+ if (work_queue)
+ destroy_workqueue(work_queue);
+fail_create_workqueue:
+ return ret;
+}
+
+static void __exit vs_transport_axon_exit(void)
+{
+ platform_driver_unregister(&transport_axon_driver);
+
+ rcu_barrier();
+
+ if (mbuf_cache)
+ kmem_cache_destroy(mbuf_cache);
+ mbuf_cache = NULL;
+
+ if (work_queue)
+ destroy_workqueue(work_queue);
+}
+
+module_init(vs_transport_axon_init);
+module_exit(vs_transport_axon_exit);
+
+MODULE_DESCRIPTION(DRIVER_DESC);
+MODULE_AUTHOR(DRIVER_AUTHOR);
diff --git a/drivers/xen/cpu_hotplug.c b/drivers/xen/cpu_hotplug.c
index 5676aef..f4e59c4 100644
--- a/drivers/xen/cpu_hotplug.c
+++ b/drivers/xen/cpu_hotplug.c
@@ -18,15 +18,16 @@
static void disable_hotplug_cpu(int cpu)
{
- if (cpu_online(cpu)) {
- lock_device_hotplug();
+ if (!cpu_is_hotpluggable(cpu))
+ return;
+ lock_device_hotplug();
+ if (cpu_online(cpu))
device_offline(get_cpu_device(cpu));
- unlock_device_hotplug();
- }
- if (cpu_present(cpu))
+ if (!cpu_online(cpu) && cpu_present(cpu)) {
xen_arch_unregister_cpu(cpu);
-
- set_cpu_present(cpu, false);
+ set_cpu_present(cpu, false);
+ }
+ unlock_device_hotplug();
}
static int vcpu_online(unsigned int cpu)
diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c
index 1435d8c..4b0cc9d 100644
--- a/drivers/xen/events/events_base.c
+++ b/drivers/xen/events/events_base.c
@@ -139,7 +139,7 @@
clear_evtchn_to_irq_row(row);
}
- evtchn_to_irq[EVTCHN_ROW(evtchn)][EVTCHN_COL(evtchn)] = irq;
+ evtchn_to_irq[row][col] = irq;
return 0;
}
diff --git a/drivers/xen/manage.c b/drivers/xen/manage.c
index 7abaaa5..abd49bc 100644
--- a/drivers/xen/manage.c
+++ b/drivers/xen/manage.c
@@ -282,9 +282,11 @@
/*
* The Xenstore watch fires directly after registering it and
* after a suspend/resume cycle. So ENOENT is no error but
- * might happen in those cases.
+ * might happen in those cases. ERANGE is observed when we get
+ * an empty value (''), this happens when we acknowledge the
+ * request by writing '\0' below.
*/
- if (err != -ENOENT)
+ if (err != -ENOENT && err != -ERANGE)
pr_err("Error %d reading sysrq code in control/sysrq\n",
err);
xenbus_transaction_end(xbt, 1);
diff --git a/fs/cifs/cifs_unicode.c b/fs/cifs/cifs_unicode.c
index a0b3e7d..211ac47 100644
--- a/fs/cifs/cifs_unicode.c
+++ b/fs/cifs/cifs_unicode.c
@@ -101,9 +101,6 @@
case SFM_LESSTHAN:
*target = '<';
break;
- case SFM_SLASH:
- *target = '\\';
- break;
case SFM_SPACE:
*target = ' ';
break;
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 8407b07..741b83c 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -577,10 +577,15 @@
}
count = 0;
+ /*
+ * We know that all the name entries in the protocols array
+ * are short (< 16 bytes anyway) and are NUL terminated.
+ */
for (i = 0; i < CIFS_NUM_PROT; i++) {
- strncpy(pSMB->DialectsArray+count, protocols[i].name, 16);
- count += strlen(protocols[i].name) + 1;
- /* null at end of source and target buffers anyway */
+ size_t len = strlen(protocols[i].name) + 1;
+
+ memcpy(pSMB->DialectsArray+count, protocols[i].name, len);
+ count += len;
}
inc_rfc1001_len(pSMB, count);
pSMB->ByteCount = cpu_to_le16(count);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 323d8e3..50559a8 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -406,9 +406,17 @@
(struct smb_com_transaction_change_notify_rsp *)buf;
struct file_notify_information *pnotify;
__u32 data_offset = 0;
+ size_t len = srv->total_read - sizeof(pSMBr->hdr.smb_buf_length);
+
if (get_bcc(buf) > sizeof(struct file_notify_information)) {
data_offset = le32_to_cpu(pSMBr->DataOffset);
+ if (data_offset >
+ len - sizeof(struct file_notify_information)) {
+ cifs_dbg(FYI, "invalid data_offset %u\n",
+ data_offset);
+ return true;
+ }
pnotify = (struct file_notify_information *)
((char *)&pSMBr->hdr.Protocol + data_offset);
cifs_dbg(FYI, "dnotify on %s Action: 0x%x\n",
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 68622f1..08c1c86 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -989,7 +989,7 @@
}
srch_inf->entries_in_buffer = 0;
- srch_inf->index_of_last_entry = 0;
+ srch_inf->index_of_last_entry = 2;
rc = SMB2_query_directory(xid, tcon, fid->persistent_fid,
fid->volatile_fid, 0, srch_inf);
diff --git a/fs/crypto/bio.c b/fs/crypto/bio.c
index c629e97..c74e9ac 100644
--- a/fs/crypto/bio.c
+++ b/fs/crypto/bio.c
@@ -131,7 +131,7 @@
bio->bi_bdev = inode->i_sb->s_bdev;
bio->bi_iter.bi_sector =
pblk << (inode->i_sb->s_blocksize_bits - 9);
- bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
+ bio_set_op_attrs(bio, REQ_OP_WRITE, REQ_NOENCRYPT);
ret = bio_add_page(bio, ciphertext_page,
inode->i_sb->s_blocksize, 0);
if (ret != inode->i_sb->s_blocksize) {
diff --git a/fs/crypto/fscrypt_private.h b/fs/crypto/fscrypt_private.h
index f2251a8..bfc5064 100644
--- a/fs/crypto/fscrypt_private.h
+++ b/fs/crypto/fscrypt_private.h
@@ -93,14 +93,9 @@
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
- if (contents_mode == FS_ENCRYPTION_MODE_SPECK128_256_XTS &&
- filenames_mode == FS_ENCRYPTION_MODE_SPECK128_256_CTS)
- return true;
-
if (contents_mode == FS_ENCRYPTION_MODE_PRIVATE &&
filenames_mode == FS_ENCRYPTION_MODE_AES_256_CTS)
return true;
-
return false;
}
diff --git a/fs/crypto/keyinfo.c b/fs/crypto/keyinfo.c
index 6219c91..d437da0 100644
--- a/fs/crypto/keyinfo.c
+++ b/fs/crypto/keyinfo.c
@@ -178,16 +178,6 @@
.cipher_str = "cts(cbc(aes))",
.keysize = 16,
},
- [FS_ENCRYPTION_MODE_SPECK128_256_XTS] = {
- .friendly_name = "Speck128/256-XTS",
- .cipher_str = "xts(speck128)",
- .keysize = 64,
- },
- [FS_ENCRYPTION_MODE_SPECK128_256_CTS] = {
- .friendly_name = "Speck128/256-CTS-CBC",
- .cipher_str = "cts(cbc(speck128))",
- .keysize = 32,
- },
[FS_ENCRYPTION_MODE_PRIVATE] = {
.friendly_name = "ICE",
.cipher_str = "bugon",
diff --git a/fs/ext4/dir.c b/fs/ext4/dir.c
index e8b3650..e16bc4c 100644
--- a/fs/ext4/dir.c
+++ b/fs/ext4/dir.c
@@ -74,7 +74,7 @@
else if (unlikely(rlen < EXT4_DIR_REC_LEN(de->name_len)))
error_msg = "rec_len is too small for name_len";
else if (unlikely(((char *) de - buf) + rlen > size))
- error_msg = "directory entry across range";
+ error_msg = "directory entry overrun";
else if (unlikely(le32_to_cpu(de->inode) >
le32_to_cpu(EXT4_SB(dir->i_sb)->s_es->s_inodes_count)))
error_msg = "inode out of bounds";
@@ -83,18 +83,16 @@
if (filp)
ext4_error_file(filp, function, line, bh->b_blocknr,
- "bad entry in directory: %s - offset=%u(%u), "
- "inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % size),
- offset, le32_to_cpu(de->inode),
- rlen, de->name_len);
+ "bad entry in directory: %s - offset=%u, "
+ "inode=%u, rec_len=%d, name_len=%d, size=%d",
+ error_msg, offset, le32_to_cpu(de->inode),
+ rlen, de->name_len, size);
else
ext4_error_inode(dir, function, line, bh->b_blocknr,
- "bad entry in directory: %s - offset=%u(%u), "
- "inode=%u, rec_len=%d, name_len=%d",
- error_msg, (unsigned) (offset % size),
- offset, le32_to_cpu(de->inode),
- rlen, de->name_len);
+ "bad entry in directory: %s - offset=%u, "
+ "inode=%u, rec_len=%d, name_len=%d, size=%d",
+ error_msg, offset, le32_to_cpu(de->inode),
+ rlen, de->name_len, size);
return 1;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 0647538..7af8b5e 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -203,7 +203,10 @@
ssize_t size; /* size of the extent */
} ext4_io_end_t;
+#define EXT4_IO_ENCRYPTED 1
+
struct ext4_io_submit {
+ unsigned int io_flags;
struct writeback_control *io_wbc;
struct bio *io_bio;
ext4_io_end_t *io_end;
@@ -3002,9 +3005,6 @@
extern int ext4_inline_data_fiemap(struct inode *inode,
struct fiemap_extent_info *fieinfo,
int *has_inline, __u64 start, __u64 len);
-extern int ext4_try_to_evict_inline_data(handle_t *handle,
- struct inode *inode,
- int needed);
extern void ext4_inline_data_truncate(struct inode *inode, int *has_inline);
extern int ext4_convert_inline_data(struct inode *inode);
diff --git a/fs/ext4/inline.c b/fs/ext4/inline.c
index 74e831f..f1661e1 100644
--- a/fs/ext4/inline.c
+++ b/fs/ext4/inline.c
@@ -903,11 +903,11 @@
flags |= AOP_FLAG_NOFS;
if (ret == -ENOSPC) {
+ ext4_journal_stop(handle);
ret = ext4_da_convert_inline_data_to_extent(mapping,
inode,
flags,
fsdata);
- ext4_journal_stop(handle);
if (ret == -ENOSPC &&
ext4_should_retry_alloc(inode->i_sb, &retries))
goto retry_journal;
@@ -1768,6 +1768,7 @@
{
int err, inline_size;
struct ext4_iloc iloc;
+ size_t inline_len;
void *inline_pos;
unsigned int offset;
struct ext4_dir_entry_2 *de;
@@ -1795,8 +1796,9 @@
goto out;
}
+ inline_len = ext4_get_inline_size(dir);
offset = EXT4_INLINE_DOTDOT_SIZE;
- while (offset < dir->i_size) {
+ while (offset < inline_len) {
de = ext4_get_inline_entry(dir, &iloc, offset,
&inline_pos, &inline_size);
if (ext4_check_dir_entry(dir, NULL, de,
@@ -1877,42 +1879,6 @@
return (error < 0 ? error : 0);
}
-/*
- * Called during xattr set, and if we can sparse space 'needed',
- * just create the extent tree evict the data to the outer block.
- *
- * We use jbd2 instead of page cache to move data to the 1st block
- * so that the whole transaction can be committed as a whole and
- * the data isn't lost because of the delayed page cache write.
- */
-int ext4_try_to_evict_inline_data(handle_t *handle,
- struct inode *inode,
- int needed)
-{
- int error;
- struct ext4_xattr_entry *entry;
- struct ext4_inode *raw_inode;
- struct ext4_iloc iloc;
-
- error = ext4_get_inode_loc(inode, &iloc);
- if (error)
- return error;
-
- raw_inode = ext4_raw_inode(&iloc);
- entry = (struct ext4_xattr_entry *)((void *)raw_inode +
- EXT4_I(inode)->i_inline_off);
- if (EXT4_XATTR_LEN(entry->e_name_len) +
- EXT4_XATTR_SIZE(le32_to_cpu(entry->e_value_size)) < needed) {
- error = -ENOSPC;
- goto out;
- }
-
- error = ext4_convert_inline_data_nolock(handle, inode, &iloc);
-out:
- brelse(iloc.bh);
- return error;
-}
-
void ext4_inline_data_truncate(struct inode *inode, int *has_inline)
{
handle_t *handle;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c38245d..2e4428c 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1150,11 +1150,12 @@
if (!buffer_uptodate(bh) && !buffer_delay(bh) &&
!buffer_unwritten(bh) &&
(block_start < from || block_end > to)) {
- ll_rw_block(REQ_OP_READ, 0, 1, &bh);
- *wait_bh++ = bh;
decrypt = ext4_encrypted_inode(inode) &&
S_ISREG(inode->i_mode) &&
!fscrypt_using_hardware_encryption(inode);
+ ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0),
+ 1, &bh);
+ *wait_bh++ = bh;
}
}
/*
@@ -3772,6 +3773,7 @@
struct inode *inode = mapping->host;
struct buffer_head *bh;
struct page *page;
+ bool decrypt;
int err = 0;
page = find_or_create_page(mapping, from >> PAGE_SHIFT,
@@ -3814,14 +3816,15 @@
if (!buffer_uptodate(bh)) {
err = -EIO;
- ll_rw_block(REQ_OP_READ, 0, 1, &bh);
+ decrypt = S_ISREG(inode->i_mode) &&
+ ext4_encrypted_inode(inode) &&
+ !fscrypt_using_hardware_encryption(inode);
+ ll_rw_block(REQ_OP_READ, (decrypt ? REQ_NOENCRYPT : 0), 1, &bh);
wait_on_buffer(bh);
/* Uhhuh. Read error. Complain and punt. */
if (!buffer_uptodate(bh))
goto unlock;
- if (S_ISREG(inode->i_mode) &&
- ext4_encrypted_inode(inode) &&
- !fscrypt_using_hardware_encryption(inode)) {
+ if (decrypt) {
/* We expect the key to be set. */
BUG_ON(!fscrypt_has_encryption_key(inode));
BUG_ON(blocksize != PAGE_SIZE);
diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
index d89754e..c2e830a 100644
--- a/fs/ext4/mmp.c
+++ b/fs/ext4/mmp.c
@@ -48,7 +48,6 @@
*/
sb_start_write(sb);
ext4_mmp_csum_set(sb, mmp);
- mark_buffer_dirty(bh);
lock_buffer(bh);
bh->b_end_io = end_buffer_write_sync;
get_bh(bh);
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index 09020e4..9bad755 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -3434,6 +3434,12 @@
int credits;
u8 old_file_type;
+ if (new.inode && new.inode->i_nlink == 0) {
+ EXT4_ERROR_INODE(new.inode,
+ "target of rename is already freed");
+ return -EFSCORRUPTED;
+ }
+
if ((ext4_test_inode_flag(new_dir, EXT4_INODE_PROJINHERIT)) &&
(!projid_eq(EXT4_I(new_dir)->i_projid,
EXT4_I(old_dentry->d_inode)->i_projid)))
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index 8d4ec1a..228baa5 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -341,6 +341,8 @@
if (bio) {
int io_op_flags = io->io_wbc->sync_mode == WB_SYNC_ALL ?
WRITE_SYNC : 0;
+ if (io->io_flags & EXT4_IO_ENCRYPTED)
+ io_op_flags |= REQ_NOENCRYPT;
bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
submit_bio(io->io_bio);
}
@@ -350,6 +352,7 @@
void ext4_io_submit_init(struct ext4_io_submit *io,
struct writeback_control *wbc)
{
+ io->io_flags = 0;
io->io_wbc = wbc;
io->io_bio = NULL;
io->io_end = NULL;
@@ -492,6 +495,8 @@
do {
if (!buffer_async_write(bh))
continue;
+ if (data_page)
+ io->io_flags |= EXT4_IO_ENCRYPTED;
ret = io_submit_add_bh(io, inode,
data_page ? data_page : page, bh);
if (ret) {
diff --git a/fs/ext4/readpage.c b/fs/ext4/readpage.c
index c39a12d..270c4f4 100644
--- a/fs/ext4/readpage.c
+++ b/fs/ext4/readpage.c
@@ -297,7 +297,8 @@
bio->bi_iter.bi_sector = blocks[0] << (blkbits - 9);
bio->bi_end_io = mpage_end_io;
bio->bi_private = ctx;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio_set_op_attrs(bio, REQ_OP_READ,
+ ctx ? REQ_NOENCRYPT : 0);
}
length = first_hole << blkbits;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index eb720d9..1da301e 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -18,6 +18,7 @@
int ext4_resize_begin(struct super_block *sb)
{
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
int ret = 0;
if (!capable(CAP_SYS_RESOURCE))
@@ -28,7 +29,7 @@
* because the user tools have no way of handling this. Probably a
* bad time to do it anyways.
*/
- if (EXT4_SB(sb)->s_sbh->b_blocknr !=
+ if (EXT4_B2C(sbi, sbi->s_sbh->b_blocknr) !=
le32_to_cpu(EXT4_SB(sb)->s_es->s_first_data_block)) {
ext4_warning(sb, "won't resize using backup superblock at %llu",
(unsigned long long)EXT4_SB(sb)->s_sbh->b_blocknr);
@@ -1954,6 +1955,26 @@
}
}
+ /*
+ * Make sure the last group has enough space so that it's
+ * guaranteed to have enough space for all metadata blocks
+ * that it might need to hold. (We might not need to store
+ * the inode table blocks in the last block group, but there
+ * will be cases where this might be needed.)
+ */
+ if ((ext4_group_first_block_no(sb, n_group) +
+ ext4_group_overhead_blocks(sb, n_group) + 2 +
+ sbi->s_itb_per_group + sbi->s_cluster_ratio) >= n_blocks_count) {
+ n_blocks_count = ext4_group_first_block_no(sb, n_group);
+ n_group--;
+ n_blocks_count_retry = 0;
+ if (resize_inode) {
+ iput(resize_inode);
+ resize_inode = NULL;
+ }
+ goto retry;
+ }
+
/* extend the last group */
if (n_group == o_group)
add = n_blocks_count - o_blocks_count;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 1a2c223..031e43d 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -2019,6 +2019,8 @@
SEQ_OPTS_PRINT("max_dir_size_kb=%u", sbi->s_max_dir_size_kb);
if (test_opt(sb, DATA_ERR_ABORT))
SEQ_OPTS_PUTS("data_err=abort");
+ if (DUMMY_ENCRYPTION_ENABLED(sbi))
+ SEQ_OPTS_PUTS("test_dummy_encryption");
ext4_show_quota_options(seq, sb);
return 0;
@@ -4193,11 +4195,13 @@
block = ext4_count_free_clusters(sb);
ext4_free_blocks_count_set(sbi->s_es,
EXT4_C2B(sbi, block));
+ ext4_superblock_csum_set(sb);
err = percpu_counter_init(&sbi->s_freeclusters_counter, block,
GFP_KERNEL);
if (!err) {
unsigned long freei = ext4_count_free_inodes(sb);
sbi->s_es->s_free_inodes_count = cpu_to_le32(freei);
+ ext4_superblock_csum_set(sb);
err = percpu_counter_init(&sbi->s_freeinodes_counter, freei,
GFP_KERNEL);
}
diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
index fdcbe0f..38385bc 100644
--- a/fs/ext4/xattr.c
+++ b/fs/ext4/xattr.c
@@ -209,12 +209,12 @@
{
int error;
- if (buffer_verified(bh))
- return 0;
-
if (BHDR(bh)->h_magic != cpu_to_le32(EXT4_XATTR_MAGIC) ||
BHDR(bh)->h_blocks != cpu_to_le32(1))
return -EFSCORRUPTED;
+ if (buffer_verified(bh))
+ return 0;
+
if (!ext4_xattr_block_csum_verify(inode, bh))
return -EFSBADCRC;
error = ext4_xattr_check_names(BFIRST(bh), bh->b_data + bh->b_size,
@@ -645,14 +645,20 @@
}
static int
-ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s)
+ext4_xattr_set_entry(struct ext4_xattr_info *i, struct ext4_xattr_search *s,
+ struct inode *inode)
{
- struct ext4_xattr_entry *last;
+ struct ext4_xattr_entry *last, *next;
size_t free, min_offs = s->end - s->base, name_len = strlen(i->name);
/* Compute min_offs and last. */
last = s->first;
- for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ for (; !IS_LAST_ENTRY(last); last = next) {
+ next = EXT4_XATTR_NEXT(last);
+ if ((void *)next >= s->end) {
+ EXT4_ERROR_INODE(inode, "corrupted xattr entries");
+ return -EFSCORRUPTED;
+ }
if (last->e_value_size) {
size_t offs = le16_to_cpu(last->e_value_offs);
if (offs < min_offs)
@@ -834,7 +840,7 @@
mb_cache_entry_delete_block(ext4_mb_cache, hash,
bs->bh->b_blocknr);
ea_bdebug(bs->bh, "modifying in-place");
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (!error) {
if (!IS_LAST_ENTRY(s->first))
ext4_xattr_rehash(header(s->base),
@@ -881,7 +887,7 @@
s->end = s->base + sb->s_blocksize;
}
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (error == -EFSCORRUPTED)
goto bad_block;
if (error)
@@ -1079,23 +1085,9 @@
if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s);
- if (error) {
- if (error == -ENOSPC &&
- ext4_has_inline_data(inode)) {
- error = ext4_try_to_evict_inline_data(handle, inode,
- EXT4_XATTR_LEN(strlen(i->name) +
- EXT4_XATTR_SIZE(i->value_len)));
- if (error)
- return error;
- error = ext4_xattr_ibody_find(inode, i, is);
- if (error)
- return error;
- error = ext4_xattr_set_entry(i, s);
- }
- if (error)
- return error;
- }
+ error = ext4_xattr_set_entry(i, s, inode);
+ if (error)
+ return error;
header = IHDR(inode, ext4_raw_inode(&is->iloc));
if (!IS_LAST_ENTRY(s->first)) {
header->h_magic = cpu_to_le32(EXT4_XATTR_MAGIC);
@@ -1117,7 +1109,7 @@
if (EXT4_I(inode)->i_extra_isize == 0)
return -ENOSPC;
- error = ext4_xattr_set_entry(i, s);
+ error = ext4_xattr_set_entry(i, s, inode);
if (error)
return error;
header = IHDR(inode, ext4_raw_inode(&is->iloc));
@@ -1426,6 +1418,11 @@
last = IFIRST(header);
/* Find the entry best suited to be pushed into EA block */
for (; !IS_LAST_ENTRY(last); last = EXT4_XATTR_NEXT(last)) {
+ /* never move system.data out of the inode */
+ if ((last->e_name_len == 4) &&
+ (last->e_name_index == EXT4_XATTR_INDEX_SYSTEM) &&
+ !memcmp(last->e_name, "data", 4))
+ continue;
total_size =
EXT4_XATTR_SIZE(le32_to_cpu(last->e_value_size)) +
EXT4_XATTR_LEN(last->e_name_len);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index d7a53c6..6b24eb4 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -28,6 +28,7 @@
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io)
{
+ f2fs_build_fault_attr(sbi, 0, 0);
set_ckpt_flags(sbi, CP_ERROR_FLAG);
if (!end_io)
f2fs_flush_merged_writes(sbi);
@@ -70,6 +71,7 @@
.encrypted_page = NULL,
.is_meta = is_meta,
};
+ int err;
if (unlikely(!is_meta))
fio.op_flags &= ~REQ_META;
@@ -84,9 +86,10 @@
fio.page = page;
- if (f2fs_submit_page_bio(&fio)) {
+ err = f2fs_submit_page_bio(&fio);
+ if (err) {
f2fs_put_page(page, 1);
- goto repeat;
+ return ERR_PTR(err);
}
lock_page(page);
@@ -95,14 +98,9 @@
goto repeat;
}
- /*
- * if there is any IO error when accessing device, make our filesystem
- * readonly and make sure do not write checkpoint with non-uptodate
- * meta page.
- */
if (unlikely(!PageUptodate(page))) {
- memset(page_address(page), 0, PAGE_SIZE);
- f2fs_stop_checkpoint(sbi, false);
+ f2fs_put_page(page, 1);
+ return ERR_PTR(-EIO);
}
out:
return page;
@@ -113,13 +111,32 @@
return __get_meta_page(sbi, index, true);
}
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index)
+{
+ struct page *page;
+ int count = 0;
+
+retry:
+ page = __get_meta_page(sbi, index, true);
+ if (IS_ERR(page)) {
+ if (PTR_ERR(page) == -EIO &&
+ ++count <= DEFAULT_RETRY_IO_COUNT)
+ goto retry;
+
+ f2fs_stop_checkpoint(sbi, false);
+ f2fs_bug_on(sbi, 1);
+ }
+
+ return page;
+}
+
/* for POR only */
struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index)
{
return __get_meta_page(sbi, index, false);
}
-bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
block_t blkaddr, int type)
{
switch (type) {
@@ -140,8 +157,20 @@
return false;
break;
case META_POR:
+ case DATA_GENERIC:
if (unlikely(blkaddr >= MAX_BLKADDR(sbi) ||
- blkaddr < MAIN_BLKADDR(sbi)))
+ blkaddr < MAIN_BLKADDR(sbi))) {
+ if (type == DATA_GENERIC) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "access invalid blkaddr:%u", blkaddr);
+ WARN_ON(1);
+ }
+ return false;
+ }
+ break;
+ case META_GENERIC:
+ if (unlikely(blkaddr < SEG0_BLKADDR(sbi) ||
+ blkaddr >= MAIN_BLKADDR(sbi)))
return false;
break;
default:
@@ -176,7 +205,7 @@
blk_start_plug(&plug);
for (; nrpages-- > 0; blkno++) {
- if (!f2fs_is_valid_meta_blkaddr(sbi, blkno, type))
+ if (!f2fs_is_valid_blkaddr(sbi, blkno, type))
goto out;
switch (type) {
@@ -242,11 +271,8 @@
trace_f2fs_writepage(page, META);
- if (unlikely(f2fs_cp_error(sbi))) {
- dec_page_count(sbi, F2FS_DIRTY_META);
- unlock_page(page);
- return 0;
- }
+ if (unlikely(f2fs_cp_error(sbi)))
+ goto redirty_out;
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
if (wbc->for_reclaim && page->index < GET_SUM_BLOCK(sbi, 0))
@@ -529,13 +555,12 @@
spin_lock(&im->ino_lock);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ORPHAN)) {
spin_unlock(&im->ino_lock);
f2fs_show_injection_info(FAULT_ORPHAN);
return -ENOSPC;
}
-#endif
+
if (unlikely(im->ino_num >= sbi->max_orphans))
err = -ENOSPC;
else
@@ -572,12 +597,7 @@
{
struct inode *inode;
struct node_info ni;
- int err = f2fs_acquire_orphan_inode(sbi);
-
- if (err)
- goto err_out;
-
- __add_ino_entry(sbi, ino, 0, ORPHAN_INO);
+ int err;
inode = f2fs_iget_retry(sbi->sb, ino);
if (IS_ERR(inode)) {
@@ -600,14 +620,15 @@
/* truncate all the data during iput */
iput(inode);
- f2fs_get_node_info(sbi, ino, &ni);
+ err = f2fs_get_node_info(sbi, ino, &ni);
+ if (err)
+ goto err_out;
/* ENOMEM was fully retried in f2fs_evict_inode. */
if (ni.blk_addr != NULL_ADDR) {
err = -EIO;
goto err_out;
}
- __remove_ino_entry(sbi, ino, ORPHAN_INO);
return 0;
err_out:
@@ -639,7 +660,10 @@
/* Needed for iput() to work correctly and not trash data */
sbi->sb->s_flags |= MS_ACTIVE;
- /* Turn on quotas so that they are updated correctly */
+ /*
+ * Turn on quotas which were not enabled for read-only mounts if
+ * filesystem has quota feature, so that they are updated correctly.
+ */
quota_enabled = f2fs_enable_quota_files(sbi, s_flags & MS_RDONLY);
#endif
@@ -649,9 +673,15 @@
f2fs_ra_meta_pages(sbi, start_blk, orphan_blocks, META_CP, true);
for (i = 0; i < orphan_blocks; i++) {
- struct page *page = f2fs_get_meta_page(sbi, start_blk + i);
+ struct page *page;
struct f2fs_orphan_block *orphan_blk;
+ page = f2fs_get_meta_page(sbi, start_blk + i);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ goto out;
+ }
+
orphan_blk = (struct f2fs_orphan_block *)page_address(page);
for (j = 0; j < le32_to_cpu(orphan_blk->entry_count); j++) {
nid_t ino = le32_to_cpu(orphan_blk->ino[j]);
@@ -742,10 +772,14 @@
__u32 crc = 0;
*cp_page = f2fs_get_meta_page(sbi, cp_addr);
+ if (IS_ERR(*cp_page))
+ return PTR_ERR(*cp_page);
+
*cp_block = (struct f2fs_checkpoint *)page_address(*cp_page);
crc_offset = le32_to_cpu((*cp_block)->checksum_offset);
if (crc_offset > (blk_size - sizeof(__le32))) {
+ f2fs_put_page(*cp_page, 1);
f2fs_msg(sbi->sb, KERN_WARNING,
"invalid crc_offset: %zu", crc_offset);
return -EINVAL;
@@ -753,6 +787,7 @@
crc = cur_cp_crc(*cp_block);
if (!f2fs_crc_valid(sbi, crc, *cp_block, crc_offset)) {
+ f2fs_put_page(*cp_page, 1);
f2fs_msg(sbi->sb, KERN_WARNING, "invalid crc value");
return -EINVAL;
}
@@ -772,14 +807,22 @@
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
&cp_page_1, version);
if (err)
- goto invalid_cp1;
+ return NULL;
+
+ if (le32_to_cpu(cp_block->cp_pack_total_block_count) >
+ sbi->blocks_per_seg) {
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "invalid cp_pack_total_block_count:%u",
+ le32_to_cpu(cp_block->cp_pack_total_block_count));
+ goto invalid_cp;
+ }
pre_version = *version;
cp_addr += le32_to_cpu(cp_block->cp_pack_total_block_count) - 1;
err = get_checkpoint_version(sbi, cp_addr, &cp_block,
&cp_page_2, version);
if (err)
- goto invalid_cp2;
+ goto invalid_cp;
cur_version = *version;
if (cur_version == pre_version) {
@@ -787,9 +830,8 @@
f2fs_put_page(cp_page_2, 1);
return cp_page_1;
}
-invalid_cp2:
f2fs_put_page(cp_page_2, 1);
-invalid_cp1:
+invalid_cp:
f2fs_put_page(cp_page_1, 1);
return NULL;
}
@@ -838,15 +880,15 @@
cp_block = (struct f2fs_checkpoint *)page_address(cur_page);
memcpy(sbi->ckpt, cp_block, blk_size);
- /* Sanity checking of checkpoint */
- if (f2fs_sanity_check_ckpt(sbi))
- goto free_fail_no_cp;
-
if (cur_page == cp1)
sbi->cur_cp_pack = 1;
else
sbi->cur_cp_pack = 2;
+ /* Sanity checking of checkpoint */
+ if (f2fs_sanity_check_ckpt(sbi))
+ goto free_fail_no_cp;
+
if (cp_blks <= 1)
goto done;
@@ -859,6 +901,8 @@
unsigned char *ckpt = (unsigned char *)sbi->ckpt;
cur_page = f2fs_get_meta_page(sbi, cp_blk_no + i);
+ if (IS_ERR(cur_page))
+ goto free_fail_no_cp;
sit_bitmap_ptr = page_address(cur_page);
memcpy(ckpt + i * blk_size, sit_bitmap_ptr, blk_size);
f2fs_put_page(cur_page, 1);
@@ -980,12 +1024,10 @@
iput(inode);
/* We need to give cpu to another writers. */
- if (ino == cur_ino) {
- congestion_wait(BLK_RW_ASYNC, HZ/50);
+ if (ino == cur_ino)
cond_resched();
- } else {
+ else
ino = cur_ino;
- }
} else {
/*
* We should submit bio, since it exists several
@@ -1119,7 +1161,7 @@
f2fs_unlock_all(sbi);
}
-static void wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi)
{
DEFINE_WAIT(wait);
@@ -1129,6 +1171,9 @@
if (!get_pages(sbi, F2FS_WB_CP_DATA))
break;
+ if (unlikely(f2fs_cp_error(sbi)))
+ break;
+
io_schedule_timeout(5*HZ);
}
finish_wait(&sbi->cp_wait, &wait);
@@ -1202,8 +1247,12 @@
/* writeout cp pack 2 page */
err = __f2fs_write_meta_page(page, &wbc, FS_CP_META_IO);
- f2fs_bug_on(sbi, err);
+ if (unlikely(err && f2fs_cp_error(sbi))) {
+ f2fs_put_page(page, 1);
+ return;
+ }
+ f2fs_bug_on(sbi, err);
f2fs_put_page(page, 0);
/* submit checkpoint (with barrier if NOBARRIER is not set) */
@@ -1229,7 +1278,7 @@
while (get_pages(sbi, F2FS_DIRTY_META)) {
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ break;
}
/*
@@ -1309,7 +1358,7 @@
f2fs_sync_meta_pages(sbi, META, LONG_MAX,
FS_CP_META_IO);
if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ break;
}
}
@@ -1348,10 +1397,7 @@
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_CP_META_IO);
/* wait for previous submitted meta pages writeback */
- wait_on_all_pages_writeback(sbi);
-
- if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ f2fs_wait_on_all_pages_writeback(sbi);
/* flush all device cache */
err = f2fs_flush_device_cache(sbi);
@@ -1360,12 +1406,19 @@
/* barrier and flush checkpoint cp pack 2 page if it can */
commit_checkpoint(sbi, ckpt, start_blk);
- wait_on_all_pages_writeback(sbi);
+ f2fs_wait_on_all_pages_writeback(sbi);
+
+ /*
+ * invalidate intermediate page cache borrowed from meta inode
+ * which are used for migration of encrypted inode's blocks.
+ */
+ if (f2fs_sb_has_encrypt(sbi->sb))
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ MAIN_BLKADDR(sbi), MAX_BLKADDR(sbi) - 1);
f2fs_release_ino_entry(sbi, false);
- if (unlikely(f2fs_cp_error(sbi)))
- return -EIO;
+ f2fs_reset_fsync_node_info(sbi);
clear_sbi_flag(sbi, SBI_IS_DIRTY);
clear_sbi_flag(sbi, SBI_NEED_CP);
@@ -1381,7 +1434,7 @@
f2fs_bug_on(sbi, get_pages(sbi, F2FS_DIRTY_DENTS));
- return 0;
+ return unlikely(f2fs_cp_error(sbi)) ? -EIO : 0;
}
/*
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f7f2c13..910b8eb 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -126,12 +126,10 @@
static void f2fs_read_end_io(struct bio *bio)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_P_SB(bio->bi_io_vec->bv_page), FAULT_IO)) {
f2fs_show_injection_info(FAULT_IO);
bio->bi_error = -EIO;
}
-#endif
if (f2fs_bio_post_read_required(bio)) {
struct bio_post_read_ctx *ctx = bio->bi_private;
@@ -177,6 +175,8 @@
page->index != nid_of_node(page));
dec_page_count(sbi, type);
+ if (f2fs_in_warm_node_list(sbi, page))
+ f2fs_del_fsync_node_entry(sbi, page);
clear_cold_data(page);
end_page_writeback(page);
}
@@ -263,7 +263,7 @@
if (type != DATA && type != NODE)
goto submit_io;
- if (f2fs_sb_has_blkzoned(sbi->sb) && current->plug)
+ if (test_opt(sbi, LFS) && current->plug)
blk_finish_plug(current->plug);
start = bio->bi_iter.bi_size >> F2FS_BLKSIZE_BITS;
@@ -441,7 +441,10 @@
fio->encrypted_page : fio->page;
struct inode *inode = fio->page->mapping->host;
- verify_block_addr(fio, fio->new_blkaddr);
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->new_blkaddr,
+ __is_meta_io(fio) ? META_GENERIC : DATA_GENERIC))
+ return -EFAULT;
+
trace_f2fs_submit_page_bio(page, fio);
f2fs_trace_ios(fio, 0);
@@ -457,6 +460,7 @@
bio_put(bio);
return -EFAULT;
}
+ fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
bio_set_op_attrs(bio, fio->op, fio->op_flags);
__submit_bio(fio->sbi, bio, fio->type);
@@ -493,11 +497,13 @@
spin_unlock(&io->io_lock);
}
- if (is_valid_blkaddr(fio->old_blkaddr))
+ if (__is_valid_data_blkaddr(fio->old_blkaddr))
verify_block_addr(fio, fio->old_blkaddr);
verify_block_addr(fio, fio->new_blkaddr);
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
+ fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
+
inode = fio->page->mapping->host;
dun = PG_DUN(inode, fio->page);
bi_crypt_skip = fio->encrypted_page ? 1 : 0;
@@ -554,19 +560,25 @@
}
static struct bio *f2fs_grab_read_bio(struct inode *inode, block_t blkaddr,
- unsigned nr_pages)
+ unsigned nr_pages, unsigned op_flag)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct bio *bio;
struct bio_post_read_ctx *ctx;
unsigned int post_read_steps = 0;
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+ return ERR_PTR(-EFAULT);
+
bio = f2fs_bio_alloc(sbi, min_t(int, nr_pages, BIO_MAX_PAGES), false);
if (!bio)
return ERR_PTR(-ENOMEM);
f2fs_target_device(sbi, blkaddr, bio);
bio->bi_end_io = f2fs_read_end_io;
- bio_set_op_attrs(bio, REQ_OP_READ, 0);
+ bio_set_op_attrs(bio, REQ_OP_READ,
+ (f2fs_encrypted_inode(inode) ?
+ REQ_NOENCRYPT :
+ op_flag));
if (f2fs_encrypted_file(inode) &&
!fscrypt_using_hardware_encryption(inode))
@@ -592,7 +604,7 @@
static int f2fs_submit_page_read(struct inode *inode, struct page *page,
block_t blkaddr)
{
- struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1);
+ struct bio *bio = f2fs_grab_read_bio(inode, blkaddr, 1, 0);
if (IS_ERR(bio))
return PTR_ERR(bio);
@@ -893,6 +905,7 @@
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct f2fs_summary sum;
struct node_info ni;
+ block_t old_blkaddr;
pgoff_t fofs;
blkcnt_t count = 1;
int err;
@@ -900,6 +913,10 @@
if (unlikely(is_inode_flag_set(dn->inode, FI_NO_ALLOC)))
return -EPERM;
+ err = f2fs_get_node_info(sbi, dn->nid, &ni);
+ if (err)
+ return err;
+
dn->data_blkaddr = datablock_addr(dn->inode,
dn->node_page, dn->ofs_in_node);
if (dn->data_blkaddr == NEW_ADDR)
@@ -909,11 +926,13 @@
return err;
alloc:
- f2fs_get_node_info(sbi, dn->nid, &ni);
set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
-
- f2fs_allocate_data_block(sbi, NULL, dn->data_blkaddr, &dn->data_blkaddr,
+ old_blkaddr = dn->data_blkaddr;
+ f2fs_allocate_data_block(sbi, NULL, old_blkaddr, &dn->data_blkaddr,
&sum, seg_type, NULL, false);
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ old_blkaddr, old_blkaddr);
f2fs_set_data_blkaddr(dn);
/* update i_size */
@@ -1069,7 +1088,13 @@
next_block:
blkaddr = datablock_addr(dn.inode, dn.node_page, dn.ofs_in_node);
- if (!is_valid_blkaddr(blkaddr)) {
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC)) {
+ err = -EFAULT;
+ goto sync_out;
+ }
+
+ if (!is_valid_data_blkaddr(sbi, blkaddr)) {
if (create) {
if (unlikely(f2fs_cp_error(sbi))) {
err = -EIO;
@@ -1306,7 +1331,11 @@
if (!page)
return -ENOMEM;
- f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return err;
+ }
phys = (__u64)blk_to_logical(inode, ni.blk_addr);
offset = offsetof(struct f2fs_inode, i_addr) +
@@ -1333,7 +1362,11 @@
if (!page)
return -ENOMEM;
- f2fs_get_node_info(sbi, xnid, &ni);
+ err = f2fs_get_node_info(sbi, xnid, &ni);
+ if (err) {
+ f2fs_put_page(page, 1);
+ return err;
+ }
phys = (__u64)blk_to_logical(inode, ni.blk_addr);
len = inode->i_sb->s_blocksize;
@@ -1445,10 +1478,15 @@
/*
* This function was originally taken from fs/mpage.c, and customized for f2fs.
* Major change was from block_size == page_size in f2fs by default.
+ *
+ * Note that the aops->readpages() function is ONLY used for read-ahead. If
+ * this function ever deviates from doing just read-ahead, it should either
+ * use ->readpage() or do the necessary surgery to decouple ->readpages()
+ * from read-ahead.
*/
static int f2fs_mpage_readpages(struct address_space *mapping,
struct list_head *pages, struct page *page,
- unsigned nr_pages)
+ unsigned nr_pages, bool is_readahead)
{
struct bio *bio = NULL;
sector_t last_block_in_bio = 0;
@@ -1521,6 +1559,10 @@
SetPageUptodate(page);
goto confused;
}
+
+ if (!f2fs_is_valid_blkaddr(F2FS_I_SB(inode), block_nr,
+ DATA_GENERIC))
+ goto set_error_page;
} else {
zero_user_segment(page, 0, PAGE_SIZE);
if (!PageUptodate(page))
@@ -1548,7 +1590,8 @@
}
if (bio == NULL) {
- bio = f2fs_grab_read_bio(inode, block_nr, nr_pages);
+ bio = f2fs_grab_read_bio(inode, block_nr, nr_pages,
+ is_readahead ? REQ_RAHEAD : 0);
if (IS_ERR(bio)) {
bio = NULL;
goto set_error_page;
@@ -1593,7 +1636,7 @@
if (f2fs_has_inline_data(inode))
ret = f2fs_read_inline_data(inode, page);
if (ret == -EAGAIN)
- ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1);
+ ret = f2fs_mpage_readpages(page->mapping, NULL, page, 1, false);
return ret;
}
@@ -1610,12 +1653,13 @@
if (f2fs_has_inline_data(inode))
return 0;
- return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages);
+ return f2fs_mpage_readpages(mapping, pages, NULL, nr_pages, true);
}
static int encrypt_one_page(struct f2fs_io_info *fio)
{
struct inode *inode = fio->page->mapping->host;
+ struct page *mpage;
gfp_t gfp_flags = GFP_NOFS;
if (!f2fs_encrypted_file(inode))
@@ -1630,17 +1674,25 @@
fio->encrypted_page = fscrypt_encrypt_page(inode, fio->page,
PAGE_SIZE, 0, fio->page->index, gfp_flags);
- if (!IS_ERR(fio->encrypted_page))
- return 0;
-
- /* flush pending IOs and wait for a while in the ENOMEM case */
- if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
- f2fs_flush_merged_writes(fio->sbi);
- congestion_wait(BLK_RW_ASYNC, HZ/50);
- gfp_flags |= __GFP_NOFAIL;
- goto retry_encrypt;
+ if (IS_ERR(fio->encrypted_page)) {
+ /* flush pending IOs and wait for a while in the ENOMEM case */
+ if (PTR_ERR(fio->encrypted_page) == -ENOMEM) {
+ f2fs_flush_merged_writes(fio->sbi);
+ congestion_wait(BLK_RW_ASYNC, HZ/50);
+ gfp_flags |= __GFP_NOFAIL;
+ goto retry_encrypt;
+ }
+ return PTR_ERR(fio->encrypted_page);
}
- return PTR_ERR(fio->encrypted_page);
+
+ mpage = find_lock_page(META_MAPPING(fio->sbi), fio->old_blkaddr);
+ if (mpage) {
+ if (PageUptodate(mpage))
+ memcpy(page_address(mpage),
+ page_address(fio->encrypted_page), PAGE_SIZE);
+ f2fs_put_page(mpage, 1);
+ }
+ return 0;
}
static inline bool check_inplace_update_policy(struct inode *inode,
@@ -1724,6 +1776,7 @@
struct inode *inode = page->mapping->host;
struct dnode_of_data dn;
struct extent_info ei = {0,0,0};
+ struct node_info ni;
bool ipu_force = false;
int err = 0;
@@ -1732,11 +1785,13 @@
f2fs_lookup_extent_cache(inode, page->index, &ei)) {
fio->old_blkaddr = ei.blk + page->index - ei.fofs;
- if (is_valid_blkaddr(fio->old_blkaddr)) {
- ipu_force = true;
- fio->need_lock = LOCK_DONE;
- goto got_it;
- }
+ if (!f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+ DATA_GENERIC))
+ return -EFAULT;
+
+ ipu_force = true;
+ fio->need_lock = LOCK_DONE;
+ goto got_it;
}
/* Deadlock due to between page->lock and f2fs_lock_op */
@@ -1755,11 +1810,17 @@
goto out_writepage;
}
got_it:
+ if (__is_valid_data_blkaddr(fio->old_blkaddr) &&
+ !f2fs_is_valid_blkaddr(fio->sbi, fio->old_blkaddr,
+ DATA_GENERIC)) {
+ err = -EFAULT;
+ goto out_writepage;
+ }
/*
* If current allocation needs SSR,
* it had better in-place writes for updated data.
*/
- if (ipu_force || (is_valid_blkaddr(fio->old_blkaddr) &&
+ if (ipu_force || (is_valid_data_blkaddr(fio->sbi, fio->old_blkaddr) &&
need_inplace_update(fio))) {
err = encrypt_one_page(fio);
if (err)
@@ -1784,6 +1845,12 @@
fio->need_lock = LOCK_REQ;
}
+ err = f2fs_get_node_info(fio->sbi, dn.nid, &ni);
+ if (err)
+ goto out_writepage;
+
+ fio->version = ni.version;
+
err = encrypt_one_page(fio);
if (err)
goto out_writepage;
@@ -2207,10 +2274,14 @@
loff_t i_size = i_size_read(inode);
if (to > i_size) {
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
+
truncate_pagecache(inode, i_size);
f2fs_truncate_blocks(inode, i_size, true);
+
up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
}
}
@@ -2315,8 +2386,9 @@
}
trace_f2fs_write_begin(inode, pos, len, flags);
- if (f2fs_is_atomic_file(inode) &&
- !f2fs_available_free_memory(sbi, INMEM_PAGES)) {
+ if ((f2fs_is_atomic_file(inode) &&
+ !f2fs_available_free_memory(sbi, INMEM_PAGES)) ||
+ is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST)) {
err = -ENOMEM;
drop_atomic = true;
goto fail;
@@ -2441,14 +2513,20 @@
static int check_direct_IO(struct inode *inode, struct iov_iter *iter,
loff_t offset)
{
- unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
+ unsigned i_blkbits = READ_ONCE(inode->i_blkbits);
+ unsigned blkbits = i_blkbits;
+ unsigned blocksize_mask = (1 << blkbits) - 1;
+ unsigned long align = offset | iov_iter_alignment(iter);
+ struct block_device *bdev = inode->i_sb->s_bdev;
- if (offset & blocksize_mask)
- return -EINVAL;
-
- if (iov_iter_alignment(iter) & blocksize_mask)
- return -EINVAL;
-
+ if (align & blocksize_mask) {
+ if (bdev)
+ blkbits = blksize_bits(bdev_logical_block_size(bdev));
+ blocksize_mask = (1 << blkbits) - 1;
+ if (align & blocksize_mask)
+ return -EINVAL;
+ return 1;
+ }
return 0;
}
@@ -2466,7 +2544,7 @@
err = check_direct_IO(inode, iter, offset);
if (err)
- return err;
+ return err < 0 ? err : 0;
if (f2fs_force_buffered_io(inode, rw))
return 0;
@@ -2588,6 +2666,10 @@
if (!PageUptodate(page))
SetPageUptodate(page);
+ /* don't remain PG_checked flag which was set during GC */
+ if (is_cold_data(page))
+ clear_cold_data(page);
+
if (f2fs_is_atomic_file(inode) && !f2fs_is_commit_atomic_write(inode)) {
if (!IS_ATOMIC_WRITTEN_PAGE(page)) {
f2fs_register_inmem_page(inode, page);
diff --git a/fs/f2fs/debug.c b/fs/f2fs/debug.c
index 2d65e77..214a968 100644
--- a/fs/f2fs/debug.c
+++ b/fs/f2fs/debug.c
@@ -215,7 +215,8 @@
si->base_mem += sizeof(struct f2fs_nm_info);
si->base_mem += __bitmap_size(sbi, NAT_BITMAP);
si->base_mem += (NM_I(sbi)->nat_bits_blocks << F2FS_BLKSIZE_BITS);
- si->base_mem += NM_I(sbi)->nat_blocks * NAT_ENTRY_BITMAP_SIZE;
+ si->base_mem += NM_I(sbi)->nat_blocks *
+ f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK);
si->base_mem += NM_I(sbi)->nat_blocks / 8;
si->base_mem += NM_I(sbi)->nat_blocks * sizeof(unsigned short);
diff --git a/fs/f2fs/dir.c b/fs/f2fs/dir.c
index 63c0732..56cc274 100644
--- a/fs/f2fs/dir.c
+++ b/fs/f2fs/dir.c
@@ -517,12 +517,11 @@
}
start:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_I_SB(dir), FAULT_DIR_DEPTH)) {
f2fs_show_injection_info(FAULT_DIR_DEPTH);
return -ENOSPC;
}
-#endif
+
if (unlikely(current_depth == MAX_DIR_HASH_DEPTH))
return -ENOSPC;
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index b696bc8..5bcbdce 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -43,7 +43,6 @@
} while (0)
#endif
-#ifdef CONFIG_F2FS_FAULT_INJECTION
enum {
FAULT_KMALLOC,
FAULT_KVMALLOC,
@@ -58,16 +57,20 @@
FAULT_TRUNCATE,
FAULT_IO,
FAULT_CHECKPOINT,
+ FAULT_DISCARD,
FAULT_MAX,
};
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+#define F2FS_ALL_FAULT_TYPE ((1 << FAULT_MAX) - 1)
+
struct f2fs_fault_info {
atomic_t inject_ops;
unsigned int inject_rate;
unsigned int inject_type;
};
-extern char *fault_name[FAULT_MAX];
+extern char *f2fs_fault_name[FAULT_MAX];
#define IS_FAULT_SET(fi, type) ((fi)->inject_type & (1 << (type)))
#endif
@@ -180,7 +183,6 @@
#define MAX_DISCARD_BLOCKS(sbi) BLKS_PER_SEC(sbi)
#define DEF_MAX_DISCARD_REQUEST 8 /* issue 8 discards per round */
-#define DEF_MAX_DISCARD_LEN 512 /* Max. 2MB per discard */
#define DEF_MIN_DISCARD_ISSUE_TIME 50 /* 50 ms, if exists */
#define DEF_MID_DISCARD_ISSUE_TIME 500 /* 500 ms, if device busy */
#define DEF_MAX_DISCARD_ISSUE_TIME 60000 /* 60 s, if no candidates */
@@ -196,7 +198,7 @@
};
/*
- * For CP/NAT/SIT/SSA readahead
+ * indicate meta/data type
*/
enum {
META_CP,
@@ -204,6 +206,8 @@
META_SIT,
META_SSA,
META_POR,
+ DATA_GENERIC,
+ META_GENERIC,
};
/* for the list of ino */
@@ -228,6 +232,12 @@
struct inode *inode; /* vfs inode pointer */
};
+struct fsync_node_entry {
+ struct list_head list; /* list head */
+ struct page *page; /* warm node page pointer */
+ unsigned int seq_id; /* sequence id */
+};
+
/* for the bitmap indicate blocks to be discarded */
struct discard_entry {
struct list_head list; /* list head */
@@ -244,9 +254,10 @@
(MAX_PLIST_NUM - 1) : (blk_num - 1))
enum {
- D_PREP,
- D_SUBMIT,
- D_DONE,
+ D_PREP, /* initial */
+ D_PARTIAL, /* partially submitted */
+ D_SUBMIT, /* all submitted */
+ D_DONE, /* finished */
};
struct discard_info {
@@ -271,7 +282,10 @@
struct block_device *bdev; /* bdev */
unsigned short ref; /* reference count */
unsigned char state; /* state */
+ unsigned char issuing; /* issuing discard */
int error; /* bio error */
+ spinlock_t lock; /* for state/bio_ref updating */
+ unsigned short bio_ref; /* bio reference count */
};
enum {
@@ -291,6 +305,7 @@
unsigned int io_aware_gran; /* minimum granularity discard not be aware of I/O */
bool io_aware; /* issue discard in idle time */
bool sync; /* submit discard with REQ_SYNC flag */
+ bool ordered; /* issue discard by lba order */
unsigned int granularity; /* discard granularity */
};
@@ -307,10 +322,12 @@
unsigned int max_discards; /* max. discards to be issued */
unsigned int discard_granularity; /* discard granularity */
unsigned int undiscard_blks; /* # of undiscard blocks */
+ unsigned int next_pos; /* next discard position */
atomic_t issued_discard; /* # of issued discard */
atomic_t issing_discard; /* # of issing discard */
atomic_t discard_cmd_cnt; /* # of cached cmd count */
struct rb_root root; /* root of discard rb-tree */
+ bool rbtree_check; /* config for consistence check */
};
/* for the list of fsync inodes, used only during recovery */
@@ -507,13 +524,12 @@
*/
};
+#define DEFAULT_RETRY_IO_COUNT 8 /* maximum retry read IO count */
+
#define F2FS_LINK_MAX 0xffffffff /* maximum link count per file */
#define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
-/* vector size for gang look-up from extent cache that consists of radix tree */
-#define EXT_TREE_VEC_SIZE 64
-
/* for in-memory extent cache entry */
#define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
@@ -599,6 +615,8 @@
#define FADVISE_HOT_BIT 0x20
#define FADVISE_VERITY_BIT 0x40 /* reserved */
+#define FADVISE_MODIFIABLE_BITS (FADVISE_COLD_BIT | FADVISE_HOT_BIT)
+
#define file_is_cold(inode) is_file(inode, FADVISE_COLD_BIT)
#define file_wrong_pino(inode) is_file(inode, FADVISE_LOST_PINO_BIT)
#define file_set_cold(inode) set_file(inode, FADVISE_COLD_BIT)
@@ -697,22 +715,22 @@
}
static inline bool __is_discard_mergeable(struct discard_info *back,
- struct discard_info *front)
+ struct discard_info *front, unsigned int max_len)
{
return (back->lstart + back->len == front->lstart) &&
- (back->len + front->len < DEF_MAX_DISCARD_LEN);
+ (back->len + front->len <= max_len);
}
static inline bool __is_discard_back_mergeable(struct discard_info *cur,
- struct discard_info *back)
+ struct discard_info *back, unsigned int max_len)
{
- return __is_discard_mergeable(back, cur);
+ return __is_discard_mergeable(back, cur, max_len);
}
static inline bool __is_discard_front_mergeable(struct discard_info *cur,
- struct discard_info *front)
+ struct discard_info *front, unsigned int max_len)
{
- return __is_discard_mergeable(cur, front);
+ return __is_discard_mergeable(cur, front, max_len);
}
static inline bool __is_extent_mergeable(struct extent_info *back,
@@ -767,6 +785,7 @@
struct radix_tree_root nat_set_root;/* root of the nat set cache */
struct rw_semaphore nat_tree_lock; /* protect nat_tree_lock */
struct list_head nat_entries; /* cached nat entry list (clean) */
+ spinlock_t nat_list_lock; /* protect clean nat entry list */
unsigned int nat_cnt; /* the # of cached nat entries */
unsigned int dirty_nat_cnt; /* total num of nat entries in set */
unsigned int nat_blocks; /* # of nat blocks */
@@ -1015,6 +1034,7 @@
bool retry; /* need to reallocate block address */
enum iostat_type io_type; /* io type */
struct writeback_control *io_wbc; /* writeback control */
+ unsigned char version; /* version of the node */
};
#define is_read_io(rw) ((rw) == READ)
@@ -1066,6 +1086,7 @@
SBI_POR_DOING, /* recovery is doing or not */
SBI_NEED_SB_WRITE, /* need to recover superblock */
SBI_NEED_CP, /* need to checkpoint */
+ SBI_IS_SHUTDOWN, /* shutdown by ioctl */
};
enum {
@@ -1149,6 +1170,11 @@
struct inode_management im[MAX_INO_ENTRY]; /* manage inode cache */
+ spinlock_t fsync_node_lock; /* for node entry lock */
+ struct list_head fsync_node_list; /* node list head */
+ unsigned int fsync_seg_id; /* sequence id */
+ unsigned int fsync_node_num; /* number of node entries */
+
/* for orphan inode, use 0'th array */
unsigned int max_orphans; /* max orphan inodes */
@@ -1216,6 +1242,7 @@
unsigned int gc_mode; /* current GC state */
/* for skip statistic */
unsigned long long skipped_atomic_files[2]; /* FG_GC and BG_GC */
+ unsigned long long skipped_gc_rwsem; /* FG_GC only */
/* threshold for gc trials on pinned files */
u64 gc_pin_file_threshold;
@@ -1280,7 +1307,7 @@
#ifdef CONFIG_F2FS_FAULT_INJECTION
#define f2fs_show_injection_info(type) \
printk("%sF2FS-fs : inject %s in %s of %pF\n", \
- KERN_INFO, fault_name[type], \
+ KERN_INFO, f2fs_fault_name[type], \
__func__, __builtin_return_address(0))
static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
{
@@ -1299,6 +1326,12 @@
}
return false;
}
+#else
+#define f2fs_show_injection_info(type) do { } while (0)
+static inline bool time_to_inject(struct f2fs_sb_info *sbi, int type)
+{
+ return false;
+}
#endif
/* For write statistics. Suppose sector size is 512 bytes,
@@ -1327,7 +1360,7 @@
struct request_list *rl = &q->root_rl;
if (rl->count[BLK_RW_SYNC] || rl->count[BLK_RW_ASYNC])
- return 0;
+ return false;
return f2fs_time_over(sbi, REQ_TIME);
}
@@ -1651,13 +1684,12 @@
if (ret)
return ret;
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_BLOCK)) {
f2fs_show_injection_info(FAULT_BLOCK);
release = *count;
goto enospc;
}
-#endif
+
/*
* let's increase this in prior to actual block count change in order
* for f2fs_sync_file to avoid data races when deciding checkpoint.
@@ -1681,18 +1713,20 @@
sbi->total_valid_block_count -= diff;
if (!*count) {
spin_unlock(&sbi->stat_lock);
- percpu_counter_sub(&sbi->alloc_valid_block_count, diff);
goto enospc;
}
}
spin_unlock(&sbi->stat_lock);
- if (unlikely(release))
+ if (unlikely(release)) {
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
dquot_release_reservation_block(inode, release);
+ }
f2fs_i_blocks_write(inode, *count, true, true);
return 0;
enospc:
+ percpu_counter_sub(&sbi->alloc_valid_block_count, release);
dquot_release_reservation_block(inode, release);
return -ENOSPC;
}
@@ -1864,12 +1898,10 @@
return ret;
}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_BLOCK)) {
f2fs_show_injection_info(FAULT_BLOCK);
goto enospc;
}
-#endif
spin_lock(&sbi->stat_lock);
@@ -1954,17 +1986,23 @@
static inline struct page *f2fs_grab_cache_page(struct address_space *mapping,
pgoff_t index, bool for_write)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- struct page *page = find_lock_page(mapping, index);
+ struct page *page;
- if (page)
- return page;
+ if (IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION)) {
+ if (!for_write)
+ page = find_get_page_flags(mapping, index,
+ FGP_LOCK | FGP_ACCESSED);
+ else
+ page = find_lock_page(mapping, index);
+ if (page)
+ return page;
- if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
- f2fs_show_injection_info(FAULT_PAGE_ALLOC);
- return NULL;
+ if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_ALLOC)) {
+ f2fs_show_injection_info(FAULT_PAGE_ALLOC);
+ return NULL;
+ }
}
-#endif
+
if (!for_write)
return grab_cache_page(mapping, index);
return grab_cache_page_write_begin(mapping, index, AOP_FLAG_NOFS);
@@ -1974,12 +2012,11 @@
struct address_space *mapping, pgoff_t index,
int fgp_flags, gfp_t gfp_mask)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_M_SB(mapping), FAULT_PAGE_GET)) {
f2fs_show_injection_info(FAULT_PAGE_GET);
return NULL;
}
-#endif
+
return pagecache_get_page(mapping, index, fgp_flags, gfp_mask);
}
@@ -2044,12 +2081,11 @@
bio = bio_alloc(GFP_NOIO | __GFP_NOFAIL, npages);
return bio;
}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ALLOC_BIO)) {
f2fs_show_injection_info(FAULT_ALLOC_BIO);
return NULL;
}
-#endif
+
return bio_alloc(GFP_KERNEL, npages);
}
@@ -2584,12 +2620,11 @@
static inline void *f2fs_kmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_KMALLOC)) {
f2fs_show_injection_info(FAULT_KMALLOC);
return NULL;
}
-#endif
+
return kmalloc(size, flags);
}
@@ -2632,12 +2667,11 @@
static inline void *f2fs_kvmalloc(struct f2fs_sb_info *sbi,
size_t size, gfp_t flags)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_KVMALLOC)) {
f2fs_show_injection_info(FAULT_KVMALLOC);
return NULL;
}
-#endif
+
return kvmalloc(size, flags);
}
@@ -2696,13 +2730,39 @@
spin_unlock(&sbi->iostat_lock);
}
-static inline bool is_valid_blkaddr(block_t blkaddr)
+#define __is_meta_io(fio) (PAGE_TYPE_OF_BIO(fio->type) == META && \
+ (!is_read_io(fio->op) || fio->is_meta))
+
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type);
+void f2fs_msg(struct super_block *sb, const char *level, const char *fmt, ...);
+static inline void verify_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type)
+{
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, type)) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "invalid blkaddr: %u, type: %d, run fsck to fix.",
+ blkaddr, type);
+ f2fs_bug_on(sbi, 1);
+ }
+}
+
+static inline bool __is_valid_data_blkaddr(block_t blkaddr)
{
if (blkaddr == NEW_ADDR || blkaddr == NULL_ADDR)
return false;
return true;
}
+static inline bool is_valid_data_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr)
+{
+ if (!__is_valid_data_blkaddr(blkaddr))
+ return false;
+ verify_blkaddr(sbi, blkaddr, DATA_GENERIC);
+ return true;
+}
+
/*
* file.c
*/
@@ -2817,16 +2877,21 @@
int f2fs_check_nid_range(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_available_free_memory(struct f2fs_sb_info *sbi, int type);
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi);
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page);
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi);
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_is_checkpointed_node(struct f2fs_sb_info *sbi, nid_t nid);
bool f2fs_need_inode_block_update(struct f2fs_sb_info *sbi, nid_t ino);
-void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
struct node_info *ni);
pgoff_t f2fs_get_next_page_offset(struct dnode_of_data *dn, pgoff_t pgofs);
int f2fs_get_dnode_of_data(struct dnode_of_data *dn, pgoff_t index, int mode);
int f2fs_truncate_inode_blocks(struct inode *inode, pgoff_t from);
int f2fs_truncate_xattr_node(struct inode *inode);
-int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino);
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+ unsigned int seq_id);
int f2fs_remove_inode_page(struct inode *inode);
struct page *f2fs_new_inode_page(struct inode *inode);
struct page *f2fs_new_node_page(struct dnode_of_data *dn, unsigned int ofs);
@@ -2835,11 +2900,12 @@
struct page *f2fs_get_node_page_ra(struct page *parent, int start);
void f2fs_move_node_page(struct page *node_page, int gc_type);
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
- struct writeback_control *wbc, bool atomic);
+ struct writeback_control *wbc, bool atomic,
+ unsigned int *seq_id);
int f2fs_sync_node_pages(struct f2fs_sb_info *sbi,
struct writeback_control *wbc,
bool do_balance, enum iostat_type io_type);
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount);
bool f2fs_alloc_nid(struct f2fs_sb_info *sbi, nid_t *nid);
void f2fs_alloc_nid_done(struct f2fs_sb_info *sbi, nid_t nid);
void f2fs_alloc_nid_failed(struct f2fs_sb_info *sbi, nid_t nid);
@@ -2847,7 +2913,7 @@
void f2fs_recover_inline_xattr(struct inode *inode, struct page *page);
int f2fs_recover_xattr_data(struct inode *inode, struct page *page);
int f2fs_recover_inode_page(struct f2fs_sb_info *sbi, struct page *page);
-void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum);
void f2fs_flush_nat_entries(struct f2fs_sb_info *sbi, struct cp_control *cpc);
int f2fs_build_node_manager(struct f2fs_sb_info *sbi);
@@ -2925,9 +2991,10 @@
void f2fs_stop_checkpoint(struct f2fs_sb_info *sbi, bool end_io);
struct page *f2fs_grab_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_meta_page(struct f2fs_sb_info *sbi, pgoff_t index);
+struct page *f2fs_get_meta_page_nofail(struct f2fs_sb_info *sbi, pgoff_t index);
struct page *f2fs_get_tmp_page(struct f2fs_sb_info *sbi, pgoff_t index);
-bool f2fs_is_valid_meta_blkaddr(struct f2fs_sb_info *sbi,
- block_t blkaddr, int type);
+bool f2fs_is_valid_blkaddr(struct f2fs_sb_info *sbi,
+ block_t blkaddr, int type);
int f2fs_ra_meta_pages(struct f2fs_sb_info *sbi, block_t start, int nrpages,
int type, bool sync);
void f2fs_ra_meta_pages_cond(struct f2fs_sb_info *sbi, pgoff_t index);
@@ -2951,6 +3018,7 @@
void f2fs_update_dirty_page(struct inode *inode, struct page *page);
void f2fs_remove_dirty_inode(struct inode *inode);
int f2fs_sync_dirty_inodes(struct f2fs_sb_info *sbi, enum inode_type type);
+void f2fs_wait_on_all_pages_writeback(struct f2fs_sb_info *sbi);
int f2fs_write_checkpoint(struct f2fs_sb_info *sbi, struct cp_control *cpc);
void f2fs_init_ino_entry_info(struct f2fs_sb_info *sbi);
int __init f2fs_create_checkpoint_caches(void);
@@ -3389,7 +3457,7 @@
return (S_ISREG(mode) || S_ISDIR(mode) || S_ISLNK(mode));
#else
- return 0;
+ return false;
#endif
}
@@ -3411,4 +3479,11 @@
fscrypt_using_hardware_encryption(inode));
}
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+extern void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+ unsigned int type);
+#else
+#define f2fs_build_fault_attr(sbi, rate, type) do { } while (0)
+#endif
+
#endif
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index 18f415a..4636b01 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -215,6 +215,7 @@
.nr_to_write = LONG_MAX,
.for_reclaim = 0,
};
+ unsigned int seq_id = 0;
if (unlikely(f2fs_readonly(inode->i_sb)))
return 0;
@@ -277,7 +278,7 @@
}
sync_nodes:
atomic_inc(&sbi->wb_sync_req[NODE]);
- ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic);
+ ret = f2fs_fsync_node_pages(sbi, inode, &wbc, atomic, &seq_id);
atomic_dec(&sbi->wb_sync_req[NODE]);
if (ret)
goto out;
@@ -303,7 +304,7 @@
* given fsync mark.
*/
if (!atomic) {
- ret = f2fs_wait_on_node_pages_writeback(sbi, ino);
+ ret = f2fs_wait_on_node_pages_writeback(sbi, seq_id);
if (ret)
goto out;
}
@@ -352,13 +353,13 @@
return pgofs;
}
-static bool __found_offset(block_t blkaddr, pgoff_t dirty, pgoff_t pgofs,
- int whence)
+static bool __found_offset(struct f2fs_sb_info *sbi, block_t blkaddr,
+ pgoff_t dirty, pgoff_t pgofs, int whence)
{
switch (whence) {
case SEEK_DATA:
if ((blkaddr == NEW_ADDR && dirty == pgofs) ||
- is_valid_blkaddr(blkaddr))
+ is_valid_data_blkaddr(sbi, blkaddr))
return true;
break;
case SEEK_HOLE:
@@ -422,7 +423,15 @@
blkaddr = datablock_addr(dn.inode,
dn.node_page, dn.ofs_in_node);
- if (__found_offset(blkaddr, dirty, pgofs, whence)) {
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(F2FS_I_SB(inode),
+ blkaddr, DATA_GENERIC)) {
+ f2fs_put_dnode(&dn);
+ goto fail;
+ }
+
+ if (__found_offset(F2FS_I_SB(inode), blkaddr, dirty,
+ pgofs, whence)) {
f2fs_put_dnode(&dn);
goto found;
}
@@ -515,6 +524,11 @@
dn->data_blkaddr = NULL_ADDR;
f2fs_set_data_blkaddr(dn);
+
+ if (__is_valid_data_blkaddr(blkaddr) &&
+ !f2fs_is_valid_blkaddr(sbi, blkaddr, DATA_GENERIC))
+ continue;
+
f2fs_invalidate_blocks(sbi, blkaddr);
if (dn->ofs_in_node == 0 && IS_INODE(dn->node_page))
clear_inode_flag(dn->inode, FI_FIRST_BLOCK_WRITTEN);
@@ -656,12 +670,11 @@
trace_f2fs_truncate(inode);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(F2FS_I_SB(inode), FAULT_TRUNCATE)) {
f2fs_show_injection_info(FAULT_TRUNCATE);
return -EIO;
}
-#endif
+
/* we should check inline_data size */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
@@ -785,22 +798,26 @@
}
if (attr->ia_valid & ATTR_SIZE) {
- if (attr->ia_size <= i_size_read(inode)) {
- down_write(&F2FS_I(inode)->i_mmap_sem);
- truncate_setsize(inode, attr->ia_size);
- err = f2fs_truncate(inode);
- up_write(&F2FS_I(inode)->i_mmap_sem);
- if (err)
- return err;
- } else {
- /*
- * do not trim all blocks after i_size if target size is
- * larger than i_size.
- */
- down_write(&F2FS_I(inode)->i_mmap_sem);
- truncate_setsize(inode, attr->ia_size);
- up_write(&F2FS_I(inode)->i_mmap_sem);
+ bool to_smaller = (attr->ia_size <= i_size_read(inode));
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ truncate_setsize(inode, attr->ia_size);
+
+ if (to_smaller)
+ err = f2fs_truncate(inode);
+ /*
+ * do not trim all blocks after i_size if target size is
+ * larger than i_size.
+ */
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
+ if (err)
+ return err;
+
+ if (!to_smaller) {
/* should convert inline inode here */
if (!f2fs_may_inline_data(inode)) {
err = f2fs_convert_inline_inode(inode);
@@ -947,14 +964,19 @@
blk_start = (loff_t)pg_start << PAGE_SHIFT;
blk_end = (loff_t)pg_end << PAGE_SHIFT;
+
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
down_write(&F2FS_I(inode)->i_mmap_sem);
+
truncate_inode_pages_range(mapping, blk_start,
blk_end - 1);
f2fs_lock_op(sbi);
ret = f2fs_truncate_hole(inode, pg_start, pg_end);
f2fs_unlock_op(sbi);
+
up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
}
}
@@ -1057,7 +1079,12 @@
if (ret)
return ret;
- f2fs_get_node_info(sbi, dn.nid, &ni);
+ ret = f2fs_get_node_info(sbi, dn.nid, &ni);
+ if (ret) {
+ f2fs_put_dnode(&dn);
+ return ret;
+ }
+
ilen = min((pgoff_t)
ADDRS_PER_PAGE(dn.node_page, dst_inode) -
dn.ofs_in_node, len - i);
@@ -1164,25 +1191,33 @@
return ret;
}
-static int f2fs_do_collapse(struct inode *inode, pgoff_t start, pgoff_t end)
+static int f2fs_do_collapse(struct inode *inode, loff_t offset, loff_t len)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
pgoff_t nrpages = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ pgoff_t start = offset >> PAGE_SHIFT;
+ pgoff_t end = (offset + len) >> PAGE_SHIFT;
int ret;
f2fs_balance_fs(sbi, true);
+
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
f2fs_lock_op(sbi);
-
f2fs_drop_extent_tree(inode);
-
+ truncate_pagecache(inode, offset);
ret = __exchange_data_block(inode, inode, end, start, nrpages - end, true);
f2fs_unlock_op(sbi);
+
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
static int f2fs_collapse_range(struct inode *inode, loff_t offset, loff_t len)
{
- pgoff_t pg_start, pg_end;
loff_t new_size;
int ret;
@@ -1197,25 +1232,17 @@
if (ret)
return ret;
- pg_start = offset >> PAGE_SHIFT;
- pg_end = (offset + len) >> PAGE_SHIFT;
-
- /* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
- down_write(&F2FS_I(inode)->i_mmap_sem);
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
if (ret)
- goto out_unlock;
+ return ret;
- truncate_pagecache(inode, offset);
-
- ret = f2fs_do_collapse(inode, pg_start, pg_end);
+ ret = f2fs_do_collapse(inode, offset, len);
if (ret)
- goto out_unlock;
+ return ret;
/* write out all moved pages, if possible */
+ down_write(&F2FS_I(inode)->i_mmap_sem);
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
truncate_pagecache(inode, offset);
@@ -1223,11 +1250,9 @@
truncate_pagecache(inode, new_size);
ret = f2fs_truncate_blocks(inode, new_size, true);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
-out_unlock:
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
@@ -1293,12 +1318,9 @@
if (ret)
return ret;
- down_write(&F2FS_I(inode)->i_mmap_sem);
ret = filemap_write_and_wait_range(mapping, offset, offset + len - 1);
if (ret)
- goto out_sem;
-
- truncate_pagecache_range(inode, offset, offset + len - 1);
+ return ret;
pg_start = ((unsigned long long) offset) >> PAGE_SHIFT;
pg_end = ((unsigned long long) offset + len) >> PAGE_SHIFT;
@@ -1310,7 +1332,7 @@
ret = fill_zero(inode, pg_start, off_start,
off_end - off_start);
if (ret)
- goto out_sem;
+ return ret;
new_size = max_t(loff_t, new_size, offset + len);
} else {
@@ -1318,7 +1340,7 @@
ret = fill_zero(inode, pg_start++, off_start,
PAGE_SIZE - off_start);
if (ret)
- goto out_sem;
+ return ret;
new_size = max_t(loff_t, new_size,
(loff_t)pg_start << PAGE_SHIFT);
@@ -1329,12 +1351,21 @@
unsigned int end_offset;
pgoff_t end;
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+
+ truncate_pagecache_range(inode,
+ (loff_t)index << PAGE_SHIFT,
+ ((loff_t)pg_end << PAGE_SHIFT) - 1);
+
f2fs_lock_op(sbi);
set_new_dnode(&dn, inode, NULL, NULL, 0);
ret = f2fs_get_dnode_of_data(&dn, index, ALLOC_NODE);
if (ret) {
f2fs_unlock_op(sbi);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
}
@@ -1343,7 +1374,10 @@
ret = f2fs_do_zero_range(&dn, index, end);
f2fs_put_dnode(&dn);
+
f2fs_unlock_op(sbi);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
f2fs_balance_fs(sbi, dn.node_changed);
@@ -1371,9 +1405,6 @@
else
f2fs_i_size_write(inode, new_size);
}
-out_sem:
- up_write(&F2FS_I(inode)->i_mmap_sem);
-
return ret;
}
@@ -1402,26 +1433,27 @@
f2fs_balance_fs(sbi, true);
- /* avoid gc operation during block exchange */
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
down_write(&F2FS_I(inode)->i_mmap_sem);
ret = f2fs_truncate_blocks(inode, i_size_read(inode), true);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
if (ret)
- goto out;
+ return ret;
/* write out all dirty pages from offset */
ret = filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
if (ret)
- goto out;
-
- truncate_pagecache(inode, offset);
+ return ret;
pg_start = offset >> PAGE_SHIFT;
pg_end = (offset + len) >> PAGE_SHIFT;
delta = pg_end - pg_start;
idx = (i_size_read(inode) + PAGE_SIZE - 1) / PAGE_SIZE;
+ /* avoid gc operation during block exchange */
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ down_write(&F2FS_I(inode)->i_mmap_sem);
+ truncate_pagecache(inode, offset);
+
while (!ret && idx > pg_start) {
nr = idx - pg_start;
if (nr > delta)
@@ -1435,16 +1467,17 @@
idx + delta, nr, false);
f2fs_unlock_op(sbi);
}
+ up_write(&F2FS_I(inode)->i_mmap_sem);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
/* write out all moved pages, if possible */
+ down_write(&F2FS_I(inode)->i_mmap_sem);
filemap_write_and_wait_range(inode->i_mapping, offset, LLONG_MAX);
truncate_pagecache(inode, offset);
+ up_write(&F2FS_I(inode)->i_mmap_sem);
if (!ret)
f2fs_i_size_write(inode, new_size);
-out:
- up_write(&F2FS_I(inode)->i_mmap_sem);
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
return ret;
}
@@ -1600,7 +1633,7 @@
struct f2fs_inode_info *fi = F2FS_I(inode);
unsigned int flags = fi->i_flags;
- if (file_is_encrypt(inode))
+ if (f2fs_encrypted_inode(inode))
flags |= F2FS_ENCRYPT_FL;
if (f2fs_has_inline_data(inode) || f2fs_has_inline_dentry(inode))
flags |= F2FS_INLINE_DATA_FL;
@@ -1684,15 +1717,18 @@
inode_lock(inode);
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
-
- if (f2fs_is_atomic_file(inode))
+ if (f2fs_is_atomic_file(inode)) {
+ if (is_inode_flag_set(inode, FI_ATOMIC_REVOKE_REQUEST))
+ ret = -EINVAL;
goto out;
+ }
ret = f2fs_convert_inline_inode(inode);
if (ret)
goto out;
+ down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+
if (!get_dirty_pages(inode))
goto skip_flush;
@@ -1700,18 +1736,20 @@
"Unexpected flush for atomic writes: ino=%lu, npages=%u",
inode->i_ino, get_dirty_pages(inode));
ret = filemap_write_and_wait_range(inode->i_mapping, 0, LLONG_MAX);
- if (ret)
+ if (ret) {
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
goto out;
+ }
skip_flush:
set_inode_flag(inode, FI_ATOMIC_FILE);
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
- f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ f2fs_update_time(F2FS_I_SB(inode), REQ_TIME);
F2FS_I(inode)->inmem_task = current;
stat_inc_atomic_write(inode);
stat_update_max_atomic_write(inode);
out:
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
@@ -1729,9 +1767,9 @@
if (ret)
return ret;
- inode_lock(inode);
+ f2fs_balance_fs(F2FS_I_SB(inode), true);
- down_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ inode_lock(inode);
if (f2fs_is_volatile_file(inode)) {
ret = -EINVAL;
@@ -1757,7 +1795,6 @@
clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
ret = -EINVAL;
}
- up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
inode_unlock(inode);
mnt_drop_write_file(filp);
return ret;
@@ -1849,6 +1886,8 @@
ret = f2fs_do_sync_file(filp, 0, LLONG_MAX, 0, true);
}
+ clear_inode_flag(inode, FI_ATOMIC_REVOKE_REQUEST);
+
inode_unlock(inode);
mnt_drop_write_file(filp);
@@ -1885,6 +1924,7 @@
}
if (sb) {
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
thaw_bdev(sb->s_bdev, sb);
}
break;
@@ -1894,13 +1934,16 @@
if (ret)
goto out;
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_NOSYNC:
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
case F2FS_GOING_DOWN_METAFLUSH:
f2fs_sync_meta_pages(sbi, META, LONG_MAX, FS_META_IO);
f2fs_stop_checkpoint(sbi, false);
+ set_sbi_flag(sbi, SBI_IS_SHUTDOWN);
break;
default:
ret = -EINVAL;
@@ -2103,7 +2146,7 @@
return ret;
}
-static int f2fs_ioc_f2fs_write_checkpoint(struct file *filp, unsigned long arg)
+static int f2fs_ioc_write_checkpoint(struct file *filp, unsigned long arg)
{
struct inode *inode = file_inode(filp);
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
@@ -2347,15 +2390,10 @@
}
inode_lock(src);
- down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
if (src != dst) {
ret = -EBUSY;
if (!inode_trylock(dst))
goto out;
- if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE])) {
- inode_unlock(dst);
- goto out;
- }
}
ret = -EINVAL;
@@ -2400,6 +2438,14 @@
goto out_unlock;
f2fs_balance_fs(sbi, true);
+
+ down_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+ if (src != dst) {
+ ret = -EBUSY;
+ if (!down_write_trylock(&F2FS_I(dst)->i_gc_rwsem[WRITE]))
+ goto out_src;
+ }
+
f2fs_lock_op(sbi);
ret = __exchange_data_block(src, dst, pos_in >> F2FS_BLKSIZE_BITS,
pos_out >> F2FS_BLKSIZE_BITS,
@@ -2412,13 +2458,15 @@
f2fs_i_size_write(dst, dst_osize);
}
f2fs_unlock_op(sbi);
-out_unlock:
- if (src != dst) {
+
+ if (src != dst)
up_write(&F2FS_I(dst)->i_gc_rwsem[WRITE]);
- inode_unlock(dst);
- }
-out:
+out_src:
up_write(&F2FS_I(src)->i_gc_rwsem[WRITE]);
+out_unlock:
+ if (src != dst)
+ inode_unlock(dst);
+out:
inode_unlock(src);
return ret;
}
@@ -2590,7 +2638,7 @@
if (!pin) {
clear_inode_flag(inode, FI_PIN_FILE);
- F2FS_I(inode)->i_gc_failures[GC_FAILURE_PIN] = 1;
+ f2fs_i_gc_failures_write(inode, 0);
goto done;
}
@@ -2696,7 +2744,7 @@
case F2FS_IOC_GARBAGE_COLLECT_RANGE:
return f2fs_ioc_gc_range(filp, arg);
case F2FS_IOC_WRITE_CHECKPOINT:
- return f2fs_ioc_f2fs_write_checkpoint(filp, arg);
+ return f2fs_ioc_write_checkpoint(filp, arg);
case F2FS_IOC_DEFRAGMENT:
return f2fs_ioc_defragment(filp, arg);
case F2FS_IOC_MOVE_RANGE:
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 9be5b50..c6322ef 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -53,12 +53,10 @@
continue;
}
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
f2fs_show_injection_info(FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
-#endif
if (!sb_start_write_trylock(sbi->sb))
continue;
@@ -517,7 +515,11 @@
continue;
}
- f2fs_get_node_info(sbi, nid, &ni);
+ if (f2fs_get_node_info(sbi, nid, &ni)) {
+ f2fs_put_page(node_page, 1);
+ continue;
+ }
+
if (ni.blk_addr != start_addr + off) {
f2fs_put_page(node_page, 1);
continue;
@@ -576,7 +578,10 @@
if (IS_ERR(node_page))
return false;
- f2fs_get_node_info(sbi, nid, dni);
+ if (f2fs_get_node_info(sbi, nid, dni)) {
+ f2fs_put_page(node_page, 1);
+ return false;
+ }
if (sum->version != dni->version) {
f2fs_msg(sbi->sb, KERN_WARNING,
@@ -594,6 +599,72 @@
return true;
}
+static int ra_data_block(struct inode *inode, pgoff_t index)
+{
+ struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct address_space *mapping = inode->i_mapping;
+ struct dnode_of_data dn;
+ struct page *page;
+ struct extent_info ei = {0, 0, 0};
+ struct f2fs_io_info fio = {
+ .sbi = sbi,
+ .ino = inode->i_ino,
+ .type = DATA,
+ .temp = COLD,
+ .op = REQ_OP_READ,
+ .op_flags = 0,
+ .encrypted_page = NULL,
+ .in_list = false,
+ .retry = false,
+ };
+ int err;
+
+ page = f2fs_grab_cache_page(mapping, index, true);
+ if (!page)
+ return -ENOMEM;
+
+ if (f2fs_lookup_extent_cache(inode, index, &ei)) {
+ dn.data_blkaddr = ei.blk + index - ei.fofs;
+ goto got_it;
+ }
+
+ set_new_dnode(&dn, inode, NULL, NULL, 0);
+ err = f2fs_get_dnode_of_data(&dn, index, LOOKUP_NODE);
+ if (err)
+ goto put_page;
+ f2fs_put_dnode(&dn);
+
+ if (unlikely(!f2fs_is_valid_blkaddr(sbi, dn.data_blkaddr,
+ DATA_GENERIC))) {
+ err = -EFAULT;
+ goto put_page;
+ }
+got_it:
+ /* read page */
+ fio.page = page;
+ fio.new_blkaddr = fio.old_blkaddr = dn.data_blkaddr;
+
+ fio.encrypted_page = f2fs_pagecache_get_page(META_MAPPING(sbi),
+ dn.data_blkaddr,
+ FGP_LOCK | FGP_CREAT, GFP_NOFS);
+ if (!fio.encrypted_page) {
+ err = -ENOMEM;
+ goto put_page;
+ }
+
+ err = f2fs_submit_page_bio(&fio);
+ if (err)
+ goto put_encrypted_page;
+ f2fs_put_page(fio.encrypted_page, 0);
+ f2fs_put_page(page, 1);
+ return 0;
+put_encrypted_page:
+ f2fs_put_page(fio.encrypted_page, 1);
+put_page:
+ f2fs_put_page(page, 1);
+ return err;
+}
+
/*
* Move data block via META_MAPPING while keeping locked data page.
* This can be used to move blocks, aka LBAs, directly on disk.
@@ -615,7 +686,7 @@
struct dnode_of_data dn;
struct f2fs_summary sum;
struct node_info ni;
- struct page *page;
+ struct page *page, *mpage;
block_t newaddr;
int err;
bool lfs_mode = test_opt(fio.sbi, LFS);
@@ -655,7 +726,10 @@
*/
f2fs_wait_on_page_writeback(page, DATA, true);
- f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(fio.sbi, dn.nid, &ni);
+ if (err)
+ goto put_out;
+
set_summary(&sum, dn.nid, dn.ofs_in_node, ni.version);
/* read page */
@@ -675,6 +749,23 @@
goto recover_block;
}
+ mpage = f2fs_pagecache_get_page(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, FGP_LOCK, GFP_NOFS);
+ if (mpage) {
+ bool updated = false;
+
+ if (PageUptodate(mpage)) {
+ memcpy(page_address(fio.encrypted_page),
+ page_address(mpage), PAGE_SIZE);
+ updated = true;
+ }
+ f2fs_put_page(mpage, 1);
+ invalidate_mapping_pages(META_MAPPING(fio.sbi),
+ fio.old_blkaddr, fio.old_blkaddr);
+ if (updated)
+ goto write_page;
+ }
+
err = f2fs_submit_page_bio(&fio);
if (err)
goto put_page_out;
@@ -691,6 +782,7 @@
goto put_page_out;
}
+write_page:
set_page_dirty(fio.encrypted_page);
f2fs_wait_on_page_writeback(fio.encrypted_page, DATA, true);
if (clear_page_dirty_for_io(fio.encrypted_page))
@@ -865,22 +957,30 @@
if (IS_ERR(inode) || is_bad_inode(inode))
continue;
- /* if inode uses special I/O path, let's go phase 3 */
+ if (!down_write_trylock(
+ &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
+ iput(inode);
+ sbi->skipped_gc_rwsem++;
+ continue;
+ }
+
+ start_bidx = f2fs_start_bidx_of_node(nofs, inode) +
+ ofs_in_node;
+
if (f2fs_post_read_required(inode)) {
+ int err = ra_data_block(inode, start_bidx);
+
+ up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
+ if (err) {
+ iput(inode);
+ continue;
+ }
add_gc_inode(gc_list, inode);
continue;
}
- if (!down_write_trylock(
- &F2FS_I(inode)->i_gc_rwsem[WRITE])) {
- iput(inode);
- continue;
- }
-
- start_bidx = f2fs_start_bidx_of_node(nofs, inode);
data_page = f2fs_get_read_data_page(inode,
- start_bidx + ofs_in_node, REQ_RAHEAD,
- true);
+ start_bidx, REQ_RAHEAD, true);
up_write(&F2FS_I(inode)->i_gc_rwsem[WRITE]);
if (IS_ERR(data_page)) {
iput(inode);
@@ -903,6 +1003,7 @@
continue;
if (!down_write_trylock(
&fi->i_gc_rwsem[WRITE])) {
+ sbi->skipped_gc_rwsem++;
up_write(&fi->i_gc_rwsem[READ]);
continue;
}
@@ -1040,6 +1141,7 @@
.iroot = RADIX_TREE_INIT(GFP_NOFS),
};
unsigned long long last_skipped = sbi->skipped_atomic_files[FG_GC];
+ unsigned long long first_skipped;
unsigned int skipped_round = 0, round = 0;
trace_f2fs_gc_begin(sbi->sb, sync, background,
@@ -1052,6 +1154,8 @@
prefree_segments(sbi));
cpc.reason = __get_cp_reason(sbi);
+ sbi->skipped_gc_rwsem = 0;
+ first_skipped = last_skipped;
gc_more:
if (unlikely(!(sbi->sb->s_flags & MS_ACTIVE))) {
ret = -EINVAL;
@@ -1093,7 +1197,8 @@
total_freed += seg_freed;
if (gc_type == FG_GC) {
- if (sbi->skipped_atomic_files[FG_GC] > last_skipped)
+ if (sbi->skipped_atomic_files[FG_GC] > last_skipped ||
+ sbi->skipped_gc_rwsem)
skipped_round++;
last_skipped = sbi->skipped_atomic_files[FG_GC];
round++;
@@ -1102,15 +1207,23 @@
if (gc_type == FG_GC)
sbi->cur_victim_sec = NULL_SEGNO;
- if (!sync) {
- if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
- if (skipped_round > MAX_SKIP_ATOMIC_COUNT &&
- skipped_round * 2 >= round)
- f2fs_drop_inmem_pages_all(sbi, true);
+ if (sync)
+ goto stop;
+
+ if (has_not_enough_free_secs(sbi, sec_freed, 0)) {
+ if (skipped_round <= MAX_SKIP_GC_COUNT ||
+ skipped_round * 2 < round) {
segno = NULL_SEGNO;
goto gc_more;
}
+ if (first_skipped < last_skipped &&
+ (last_skipped - first_skipped) >
+ sbi->skipped_gc_rwsem) {
+ f2fs_drop_inmem_pages_all(sbi, true);
+ segno = NULL_SEGNO;
+ goto gc_more;
+ }
if (gc_type == FG_GC)
ret = f2fs_write_checkpoint(sbi, &cpc);
}
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index dd4cde8..df71d26 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -139,6 +139,7 @@
.encrypted_page = NULL,
.io_type = FS_DATA_IO,
};
+ struct node_info ni;
int dirty, err;
if (!f2fs_exist_data(dn->inode))
@@ -148,6 +149,14 @@
if (err)
return err;
+ err = f2fs_get_node_info(fio.sbi, dn->nid, &ni);
+ if (err) {
+ f2fs_put_dnode(dn);
+ return err;
+ }
+
+ fio.version = ni.version;
+
if (unlikely(dn->data_blkaddr != NEW_ADDR)) {
f2fs_put_dnode(dn);
set_sbi_flag(fio.sbi, SBI_NEED_FSCK);
@@ -516,6 +525,7 @@
return 0;
recover:
lock_page(ipage);
+ f2fs_wait_on_page_writeback(ipage, NODE, true);
memcpy(inline_dentry, backup_dentry, MAX_INLINE_DATA(dir));
f2fs_i_depth_write(dir, 0);
f2fs_i_size_write(dir, MAX_INLINE_DATA(dir));
@@ -707,7 +717,10 @@
ilen = start + len;
ilen -= start;
- f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+ err = f2fs_get_node_info(F2FS_I_SB(inode), inode->i_ino, &ni);
+ if (err)
+ goto out;
+
byteaddr = (__u64)ni.blk_addr << inode->i_sb->s_blocksize_bits;
byteaddr += (char *)inline_data_addr(inode, ipage) -
(char *)F2FS_INODE(ipage);
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 30a7773..959df22 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -68,13 +68,16 @@
}
}
-static bool __written_first_block(struct f2fs_inode *ri)
+static int __written_first_block(struct f2fs_sb_info *sbi,
+ struct f2fs_inode *ri)
{
block_t addr = le32_to_cpu(ri->i_addr[offset_in_addr(ri)]);
- if (is_valid_blkaddr(addr))
- return true;
- return false;
+ if (!__is_valid_data_blkaddr(addr))
+ return 1;
+ if (!f2fs_is_valid_blkaddr(sbi, addr, DATA_GENERIC))
+ return -EFAULT;
+ return 0;
}
static void __set_inode_rdev(struct inode *inode, struct f2fs_inode *ri)
@@ -121,7 +124,7 @@
if (!f2fs_sb_has_inode_chksum(sbi->sb))
return false;
- if (!RAW_IS_INODE(F2FS_NODE(page)) || !(ri->i_inline & F2FS_EXTRA_ATTR))
+ if (!IS_INODE(page) || !(ri->i_inline & F2FS_EXTRA_ATTR))
return false;
if (!F2FS_FITS_IN_INODE(ri, le16_to_cpu(ri->i_extra_isize),
@@ -159,8 +162,15 @@
struct f2fs_inode *ri;
__u32 provided, calculated;
+ if (unlikely(is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)))
+ return true;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ if (!f2fs_enable_inode_chksum(sbi, page))
+#else
if (!f2fs_enable_inode_chksum(sbi, page) ||
PageDirty(page) || PageWriteback(page))
+#endif
return true;
ri = &F2FS_NODE(page)->i;
@@ -185,9 +195,31 @@
ri->i_inode_checksum = cpu_to_le32(f2fs_inode_chksum(sbi, page));
}
-static bool sanity_check_inode(struct inode *inode)
+static bool sanity_check_inode(struct inode *inode, struct page *node_page)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+ struct f2fs_inode_info *fi = F2FS_I(inode);
+ unsigned long long iblocks;
+
+ iblocks = le64_to_cpu(F2FS_INODE(node_page)->i_blocks);
+ if (!iblocks) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: corrupted inode i_blocks i_ino=%lx iblocks=%llu, "
+ "run fsck to fix.",
+ __func__, inode->i_ino, iblocks);
+ return false;
+ }
+
+ if (ino_of_node(node_page) != nid_of_node(node_page)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: corrupted inode footer i_ino=%lx, ino,nid: "
+ "[%u, %u] run fsck to fix.",
+ __func__, inode->i_ino,
+ ino_of_node(node_page), nid_of_node(node_page));
+ return false;
+ }
if (f2fs_sb_has_flexible_inline_xattr(sbi->sb)
&& !f2fs_has_extra_attr(inode)) {
@@ -197,6 +229,64 @@
__func__, inode->i_ino);
return false;
}
+
+ if (f2fs_has_extra_attr(inode) &&
+ !f2fs_sb_has_extra_attr(sbi->sb)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) is with extra_attr, "
+ "but extra_attr feature is off",
+ __func__, inode->i_ino);
+ return false;
+ }
+
+ if (fi->i_extra_isize > F2FS_TOTAL_EXTRA_ATTR_SIZE ||
+ fi->i_extra_isize % sizeof(__le32)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) has corrupted i_extra_isize: %d, "
+ "max: %zu",
+ __func__, inode->i_ino, fi->i_extra_isize,
+ F2FS_TOTAL_EXTRA_ATTR_SIZE);
+ return false;
+ }
+
+ if (F2FS_I(inode)->extent_tree) {
+ struct extent_info *ei = &F2FS_I(inode)->extent_tree->largest;
+
+ if (ei->len &&
+ (!f2fs_is_valid_blkaddr(sbi, ei->blk, DATA_GENERIC) ||
+ !f2fs_is_valid_blkaddr(sbi, ei->blk + ei->len - 1,
+ DATA_GENERIC))) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx) extent info [%u, %u, %u] "
+ "is incorrect, run fsck to fix",
+ __func__, inode->i_ino,
+ ei->blk, ei->fofs, ei->len);
+ return false;
+ }
+ }
+
+ if (f2fs_has_inline_data(inode) &&
+ (!S_ISREG(inode->i_mode) && !S_ISLNK(inode->i_mode))) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx, mode=%u) should not have "
+ "inline_data, run fsck to fix",
+ __func__, inode->i_ino, inode->i_mode);
+ return false;
+ }
+
+ if (f2fs_has_inline_dentry(inode) && !S_ISDIR(inode->i_mode)) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "%s: inode (ino=%lx, mode=%u) should not have "
+ "inline_dentry, run fsck to fix",
+ __func__, inode->i_ino, inode->i_mode);
+ return false;
+ }
+
return true;
}
@@ -207,6 +297,7 @@
struct page *node_page;
struct f2fs_inode *ri;
projid_t i_projid;
+ int err;
/* Check if ino is within scope */
if (f2fs_check_nid_range(sbi, inode->i_ino))
@@ -268,6 +359,11 @@
fi->i_inline_xattr_size = 0;
}
+ if (!sanity_check_inode(inode, node_page)) {
+ f2fs_put_page(node_page, 1);
+ return -EINVAL;
+ }
+
/* check data exist */
if (f2fs_has_inline_data(inode) && !f2fs_exist_data(inode))
__recover_inline_status(inode, node_page);
@@ -275,8 +371,15 @@
/* get rdev by using inline_info */
__get_inode_rdev(inode, ri);
- if (__written_first_block(ri))
- set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ if (S_ISREG(inode->i_mode)) {
+ err = __written_first_block(sbi, ri);
+ if (err < 0) {
+ f2fs_put_page(node_page, 1);
+ return err;
+ }
+ if (!err)
+ set_inode_flag(inode, FI_FIRST_BLOCK_WRITTEN);
+ }
if (!f2fs_need_inode_block_update(sbi, inode->i_ino))
fi->last_disk_size = inode->i_size;
@@ -330,10 +433,6 @@
ret = do_read_inode(inode);
if (ret)
goto bad_inode;
- if (!sanity_check_inode(inode)) {
- ret = -EINVAL;
- goto bad_inode;
- }
make_now:
if (ino == F2FS_NODE_INO(sbi)) {
inode->i_mapping->a_ops = &f2fs_node_aops;
@@ -474,6 +573,10 @@
F2FS_I(inode)->i_disk_time[1] = inode->i_ctime;
F2FS_I(inode)->i_disk_time[2] = inode->i_mtime;
F2FS_I(inode)->i_disk_time[3] = F2FS_I(inode)->i_crtime;
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_inode_chksum_set(F2FS_I_SB(inode), node_page);
+#endif
}
void f2fs_update_inode_page(struct inode *inode)
@@ -558,12 +661,11 @@
if (F2FS_HAS_BLOCKS(inode))
err = f2fs_truncate(inode);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_EVICT_INODE)) {
f2fs_show_injection_info(FAULT_EVICT_INODE);
err = -EIO;
}
-#endif
+
if (!err) {
f2fs_lock_op(sbi);
err = f2fs_remove_inode_page(inode);
@@ -626,6 +728,7 @@
{
struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
struct node_info ni;
+ int err;
/*
* clear nlink of inode in order to release resource of inode
@@ -648,10 +751,16 @@
* so we can prevent losing this orphan when encoutering checkpoint
* and following suddenly power-off.
*/
- f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ err = f2fs_get_node_info(sbi, inode->i_ino, &ni);
+ if (err) {
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
+ f2fs_msg(sbi->sb, KERN_WARNING,
+ "May loss orphan inode, run fsck to fix.");
+ goto out;
+ }
if (ni.blk_addr != NULL_ADDR) {
- int err = f2fs_acquire_orphan_inode(sbi);
+ err = f2fs_acquire_orphan_inode(sbi);
if (err) {
set_sbi_flag(sbi, SBI_NEED_FSCK);
f2fs_msg(sbi->sb, KERN_WARNING,
@@ -664,6 +773,7 @@
set_inode_flag(inode, FI_FREE_NID);
}
+out:
f2fs_unlock_op(sbi);
/* iput will drop the inode object */
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index c53760e..56593b3 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -246,7 +246,7 @@
return -EINVAL;
if (hot) {
- strncpy(extlist[count], name, strlen(name));
+ memcpy(extlist[count], name, strlen(name));
sbi->raw_super->hot_ext_count = hot_count + 1;
} else {
char buf[F2FS_MAX_EXTENSION][F2FS_EXTENSION_LEN];
@@ -254,7 +254,7 @@
memcpy(buf, &extlist[cold_count],
F2FS_EXTENSION_LEN * hot_count);
memset(extlist[cold_count], 0, F2FS_EXTENSION_LEN);
- strncpy(extlist[cold_count], name, strlen(name));
+ memcpy(extlist[cold_count], name, strlen(name));
memcpy(&extlist[cold_count + 1], buf,
F2FS_EXTENSION_LEN * hot_count);
sbi->raw_super->extension_count = cpu_to_le32(cold_count + 1);
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index aab2dd2..f213a53 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -28,6 +28,7 @@
static struct kmem_cache *nat_entry_slab;
static struct kmem_cache *free_nid_slab;
static struct kmem_cache *nat_entry_set_slab;
+static struct kmem_cache *fsync_node_entry_slab;
/*
* Check whether the given nid is within node id range.
@@ -112,25 +113,22 @@
static struct page *get_current_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
{
- pgoff_t index = current_nat_addr(sbi, nid);
- return f2fs_get_meta_page(sbi, index);
+ return f2fs_get_meta_page_nofail(sbi, current_nat_addr(sbi, nid));
}
static struct page *get_next_nat_page(struct f2fs_sb_info *sbi, nid_t nid)
{
struct page *src_page;
struct page *dst_page;
- pgoff_t src_off;
pgoff_t dst_off;
void *src_addr;
void *dst_addr;
struct f2fs_nm_info *nm_i = NM_I(sbi);
- src_off = current_nat_addr(sbi, nid);
- dst_off = next_nat_addr(sbi, src_off);
+ dst_off = next_nat_addr(sbi, current_nat_addr(sbi, nid));
/* get current nat block page with lock */
- src_page = f2fs_get_meta_page(sbi, src_off);
+ src_page = get_current_nat_page(sbi, nid);
dst_page = f2fs_grab_meta_page(sbi, dst_off);
f2fs_bug_on(sbi, PageDirty(src_page));
@@ -176,14 +174,30 @@
if (raw_ne)
node_info_from_raw_nat(&ne->ni, raw_ne);
+
+ spin_lock(&nm_i->nat_list_lock);
list_add_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+
nm_i->nat_cnt++;
return ne;
}
static struct nat_entry *__lookup_nat_cache(struct f2fs_nm_info *nm_i, nid_t n)
{
- return radix_tree_lookup(&nm_i->nat_root, n);
+ struct nat_entry *ne;
+
+ ne = radix_tree_lookup(&nm_i->nat_root, n);
+
+ /* for recent accessed nat entry, move it to tail of lru list */
+ if (ne && !get_nat_flag(ne, IS_DIRTY)) {
+ spin_lock(&nm_i->nat_list_lock);
+ if (!list_empty(&ne->list))
+ list_move_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+ }
+
+ return ne;
}
static unsigned int __gang_lookup_nat_cache(struct f2fs_nm_info *nm_i,
@@ -194,7 +208,6 @@
static void __del_from_nat_cache(struct f2fs_nm_info *nm_i, struct nat_entry *e)
{
- list_del(&e->list);
radix_tree_delete(&nm_i->nat_root, nat_get_nid(e));
nm_i->nat_cnt--;
__free_nat_entry(e);
@@ -245,16 +258,21 @@
nm_i->dirty_nat_cnt++;
set_nat_flag(ne, IS_DIRTY, true);
refresh_list:
+ spin_lock(&nm_i->nat_list_lock);
if (new_ne)
list_del_init(&ne->list);
else
list_move_tail(&ne->list, &head->entry_list);
+ spin_unlock(&nm_i->nat_list_lock);
}
static void __clear_nat_cache_dirty(struct f2fs_nm_info *nm_i,
struct nat_entry_set *set, struct nat_entry *ne)
{
+ spin_lock(&nm_i->nat_list_lock);
list_move_tail(&ne->list, &nm_i->nat_entries);
+ spin_unlock(&nm_i->nat_list_lock);
+
set_nat_flag(ne, IS_DIRTY, false);
set->entry_cnt--;
nm_i->dirty_nat_cnt--;
@@ -267,6 +285,72 @@
start, nr);
}
+bool f2fs_in_warm_node_list(struct f2fs_sb_info *sbi, struct page *page)
+{
+ return NODE_MAPPING(sbi) == page->mapping &&
+ IS_DNODE(page) && is_cold_node(page);
+}
+
+void f2fs_init_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+ spin_lock_init(&sbi->fsync_node_lock);
+ INIT_LIST_HEAD(&sbi->fsync_node_list);
+ sbi->fsync_seg_id = 0;
+ sbi->fsync_node_num = 0;
+}
+
+static unsigned int f2fs_add_fsync_node_entry(struct f2fs_sb_info *sbi,
+ struct page *page)
+{
+ struct fsync_node_entry *fn;
+ unsigned long flags;
+ unsigned int seq_id;
+
+ fn = f2fs_kmem_cache_alloc(fsync_node_entry_slab, GFP_NOFS);
+
+ get_page(page);
+ fn->page = page;
+ INIT_LIST_HEAD(&fn->list);
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ list_add_tail(&fn->list, &sbi->fsync_node_list);
+ fn->seq_id = sbi->fsync_seg_id++;
+ seq_id = fn->seq_id;
+ sbi->fsync_node_num++;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+
+ return seq_id;
+}
+
+void f2fs_del_fsync_node_entry(struct f2fs_sb_info *sbi, struct page *page)
+{
+ struct fsync_node_entry *fn;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ list_for_each_entry(fn, &sbi->fsync_node_list, list) {
+ if (fn->page == page) {
+ list_del(&fn->list);
+ sbi->fsync_node_num--;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ kmem_cache_free(fsync_node_entry_slab, fn);
+ put_page(page);
+ return;
+ }
+ }
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ f2fs_bug_on(sbi, 1);
+}
+
+void f2fs_reset_fsync_node_info(struct f2fs_sb_info *sbi)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ sbi->fsync_seg_id = 0;
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+}
+
int f2fs_need_dentry_mark(struct f2fs_sb_info *sbi, nid_t nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -371,7 +455,7 @@
new_blkaddr == NULL_ADDR);
f2fs_bug_on(sbi, nat_get_blkaddr(e) == NEW_ADDR &&
new_blkaddr == NEW_ADDR);
- f2fs_bug_on(sbi, is_valid_blkaddr(nat_get_blkaddr(e)) &&
+ f2fs_bug_on(sbi, is_valid_data_blkaddr(sbi, nat_get_blkaddr(e)) &&
new_blkaddr == NEW_ADDR);
/* increment version no as node is removed */
@@ -382,7 +466,7 @@
/* change address */
nat_set_blkaddr(e, new_blkaddr);
- if (!is_valid_blkaddr(new_blkaddr))
+ if (!is_valid_data_blkaddr(sbi, new_blkaddr))
set_nat_flag(e, IS_CHECKPOINTED, false);
__set_nat_cache_dirty(nm_i, e);
@@ -405,13 +489,25 @@
if (!down_write_trylock(&nm_i->nat_tree_lock))
return 0;
- while (nr_shrink && !list_empty(&nm_i->nat_entries)) {
+ spin_lock(&nm_i->nat_list_lock);
+ while (nr_shrink) {
struct nat_entry *ne;
+
+ if (list_empty(&nm_i->nat_entries))
+ break;
+
ne = list_first_entry(&nm_i->nat_entries,
struct nat_entry, list);
+ list_del(&ne->list);
+ spin_unlock(&nm_i->nat_list_lock);
+
__del_from_nat_cache(nm_i, ne);
nr_shrink--;
+
+ spin_lock(&nm_i->nat_list_lock);
}
+ spin_unlock(&nm_i->nat_list_lock);
+
up_write(&nm_i->nat_tree_lock);
return nr - nr_shrink;
}
@@ -419,7 +515,7 @@
/*
* This function always returns success
*/
-void f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
+int f2fs_get_node_info(struct f2fs_sb_info *sbi, nid_t nid,
struct node_info *ni)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -443,7 +539,7 @@
ni->blk_addr = nat_get_blkaddr(e);
ni->version = nat_get_version(e);
up_read(&nm_i->nat_tree_lock);
- return;
+ return 0;
}
memset(&ne, 0, sizeof(struct f2fs_nat_entry));
@@ -466,6 +562,9 @@
up_read(&nm_i->nat_tree_lock);
page = f2fs_get_meta_page(sbi, index);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
nat_blk = (struct f2fs_nat_block *)page_address(page);
ne = nat_blk->entries[nid - start_nid];
node_info_from_raw_nat(ni, &ne);
@@ -473,6 +572,7 @@
cache:
/* cache nat entry */
cache_nat_entry(sbi, nid, &ne);
+ return 0;
}
/*
@@ -722,12 +822,15 @@
return err;
}
-static void truncate_node(struct dnode_of_data *dn)
+static int truncate_node(struct dnode_of_data *dn)
{
struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
struct node_info ni;
+ int err;
- f2fs_get_node_info(sbi, dn->nid, &ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &ni);
+ if (err)
+ return err;
/* Deallocate node address */
f2fs_invalidate_blocks(sbi, ni.blk_addr);
@@ -750,11 +853,14 @@
dn->node_page = NULL;
trace_f2fs_truncate_node(dn->inode, dn->nid, ni.blk_addr);
+
+ return 0;
}
static int truncate_dnode(struct dnode_of_data *dn)
{
struct page *page;
+ int err;
if (dn->nid == 0)
return 1;
@@ -770,7 +876,10 @@
dn->node_page = page;
dn->ofs_in_node = 0;
f2fs_truncate_data_blocks(dn);
- truncate_node(dn);
+ err = truncate_node(dn);
+ if (err)
+ return err;
+
return 1;
}
@@ -835,7 +944,9 @@
if (!ofs) {
/* remove current indirect node */
dn->node_page = page;
- truncate_node(dn);
+ ret = truncate_node(dn);
+ if (ret)
+ goto out_err;
freed++;
} else {
f2fs_put_page(page, 1);
@@ -893,7 +1004,9 @@
if (offset[idx + 1] == 0) {
dn->node_page = pages[idx];
dn->nid = nid[idx];
- truncate_node(dn);
+ err = truncate_node(dn);
+ if (err)
+ goto fail;
} else {
f2fs_put_page(pages[idx], 1);
}
@@ -1014,6 +1127,7 @@
nid_t nid = F2FS_I(inode)->i_xattr_nid;
struct dnode_of_data dn;
struct page *npage;
+ int err;
if (!nid)
return 0;
@@ -1022,10 +1136,15 @@
if (IS_ERR(npage))
return PTR_ERR(npage);
+ set_new_dnode(&dn, inode, NULL, npage, nid);
+ err = truncate_node(&dn);
+ if (err) {
+ f2fs_put_page(npage, 1);
+ return err;
+ }
+
f2fs_i_xnid_write(inode, 0);
- set_new_dnode(&dn, inode, NULL, npage, nid);
- truncate_node(&dn);
return 0;
}
@@ -1055,11 +1174,19 @@
f2fs_truncate_data_blocks_range(&dn, 1);
/* 0 is possible, after f2fs_new_inode() has failed */
+ if (unlikely(f2fs_cp_error(F2FS_I_SB(inode)))) {
+ f2fs_put_dnode(&dn);
+ return -EIO;
+ }
f2fs_bug_on(F2FS_I_SB(inode),
inode->i_blocks != 0 && inode->i_blocks != 8);
/* will put inode & node pages */
- truncate_node(&dn);
+ err = truncate_node(&dn);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
return 0;
}
@@ -1092,7 +1219,11 @@
goto fail;
#ifdef CONFIG_F2FS_CHECK_FS
- f2fs_get_node_info(sbi, dn->nid, &new_ni);
+ err = f2fs_get_node_info(sbi, dn->nid, &new_ni);
+ if (err) {
+ dec_valid_node_count(sbi, dn->inode, !ofs);
+ goto fail;
+ }
f2fs_bug_on(sbi, new_ni.blk_addr != NULL_ADDR);
#endif
new_ni.nid = dn->nid;
@@ -1140,13 +1271,21 @@
.page = page,
.encrypted_page = NULL,
};
+ int err;
- if (PageUptodate(page))
+ if (PageUptodate(page)) {
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_bug_on(sbi, !f2fs_inode_chksum_verify(sbi, page));
+#endif
return LOCKED_PAGE;
+ }
- f2fs_get_node_info(sbi, page->index, &ni);
+ err = f2fs_get_node_info(sbi, page->index, &ni);
+ if (err)
+ return err;
- if (unlikely(ni.blk_addr == NULL_ADDR)) {
+ if (unlikely(ni.blk_addr == NULL_ADDR) ||
+ is_sbi_flag_set(sbi, SBI_IS_SHUTDOWN)) {
ClearPageUptodate(page);
return -ENOENT;
}
@@ -1348,7 +1487,7 @@
static int __write_node_page(struct page *page, bool atomic, bool *submitted,
struct writeback_control *wbc, bool do_balance,
- enum iostat_type io_type)
+ enum iostat_type io_type, unsigned int *seq_id)
{
struct f2fs_sb_info *sbi = F2FS_P_SB(page);
nid_t nid;
@@ -1365,6 +1504,7 @@
.io_type = io_type,
.io_wbc = wbc,
};
+ unsigned int seq;
trace_f2fs_writepage(page, NODE);
@@ -1374,10 +1514,17 @@
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
goto redirty_out;
+ if (wbc->sync_mode == WB_SYNC_NONE &&
+ IS_DNODE(page) && is_cold_node(page))
+ goto redirty_out;
+
/* get old block addr of this node page */
nid = nid_of_node(page);
f2fs_bug_on(sbi, page->index != nid);
+ if (f2fs_get_node_info(sbi, nid, &ni))
+ goto redirty_out;
+
if (wbc->for_reclaim) {
if (!down_read_trylock(&sbi->node_write))
goto redirty_out;
@@ -1385,8 +1532,6 @@
down_read(&sbi->node_write);
}
- f2fs_get_node_info(sbi, nid, &ni);
-
/* This page is already truncated */
if (unlikely(ni.blk_addr == NULL_ADDR)) {
ClearPageUptodate(page);
@@ -1396,11 +1541,22 @@
return 0;
}
+ if (__is_valid_data_blkaddr(ni.blk_addr) &&
+ !f2fs_is_valid_blkaddr(sbi, ni.blk_addr, DATA_GENERIC))
+ goto redirty_out;
+
if (atomic && !test_opt(sbi, NOBARRIER))
fio.op_flags |= REQ_PREFLUSH | REQ_FUA;
set_page_writeback(page);
ClearPageError(page);
+
+ if (f2fs_in_warm_node_list(sbi, page)) {
+ seq = f2fs_add_fsync_node_entry(sbi, page);
+ if (seq_id)
+ *seq_id = seq;
+ }
+
fio.old_blkaddr = ni.blk_addr;
f2fs_do_write_node_page(nid, &fio);
set_node_addr(sbi, &ni, fio.new_blkaddr, is_fsync_dnode(page));
@@ -1448,7 +1604,7 @@
goto out_page;
if (__write_node_page(node_page, false, NULL,
- &wbc, false, FS_GC_NODE_IO))
+ &wbc, false, FS_GC_NODE_IO, NULL))
unlock_page(node_page);
goto release_page;
} else {
@@ -1465,11 +1621,13 @@
static int f2fs_write_node_page(struct page *page,
struct writeback_control *wbc)
{
- return __write_node_page(page, false, NULL, wbc, false, FS_NODE_IO);
+ return __write_node_page(page, false, NULL, wbc, false,
+ FS_NODE_IO, NULL);
}
int f2fs_fsync_node_pages(struct f2fs_sb_info *sbi, struct inode *inode,
- struct writeback_control *wbc, bool atomic)
+ struct writeback_control *wbc, bool atomic,
+ unsigned int *seq_id)
{
pgoff_t index;
pgoff_t last_idx = ULONG_MAX;
@@ -1550,7 +1708,7 @@
ret = __write_node_page(page, atomic &&
page == last_page,
&submitted, wbc, true,
- FS_NODE_IO);
+ FS_NODE_IO, seq_id);
if (ret) {
unlock_page(page);
f2fs_put_page(last_page, 0);
@@ -1667,7 +1825,7 @@
set_dentry_mark(page, 0);
ret = __write_node_page(page, false, &submitted,
- wbc, do_balance, io_type);
+ wbc, do_balance, io_type, NULL);
if (ret)
unlock_page(page);
else if (submitted)
@@ -1686,10 +1844,12 @@
}
if (step < 2) {
+ if (wbc->sync_mode == WB_SYNC_NONE && step == 1)
+ goto out;
step++;
goto next_step;
}
-
+out:
if (nwritten)
f2fs_submit_merged_write(sbi, NODE);
@@ -1698,35 +1858,46 @@
return ret;
}
-int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi, nid_t ino)
+int f2fs_wait_on_node_pages_writeback(struct f2fs_sb_info *sbi,
+ unsigned int seq_id)
{
- pgoff_t index = 0;
- struct pagevec pvec;
+ struct fsync_node_entry *fn;
+ struct page *page;
+ struct list_head *head = &sbi->fsync_node_list;
+ unsigned long flags;
+ unsigned int cur_seq_id = 0;
int ret2, ret = 0;
- int nr_pages;
- pagevec_init(&pvec, 0);
-
- while ((nr_pages = pagevec_lookup_tag(&pvec, NODE_MAPPING(sbi), &index,
- PAGECACHE_TAG_WRITEBACK))) {
- int i;
-
- for (i = 0; i < nr_pages; i++) {
- struct page *page = pvec.pages[i];
-
- if (ino && ino_of_node(page) == ino) {
- f2fs_wait_on_page_writeback(page, NODE, true);
- if (TestClearPageError(page))
- ret = -EIO;
- }
+ while (seq_id && cur_seq_id < seq_id) {
+ spin_lock_irqsave(&sbi->fsync_node_lock, flags);
+ if (list_empty(head)) {
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ break;
}
- pagevec_release(&pvec);
- cond_resched();
+ fn = list_first_entry(head, struct fsync_node_entry, list);
+ if (fn->seq_id > seq_id) {
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+ break;
+ }
+ cur_seq_id = fn->seq_id;
+ page = fn->page;
+ get_page(page);
+ spin_unlock_irqrestore(&sbi->fsync_node_lock, flags);
+
+ f2fs_wait_on_page_writeback(page, NODE, true);
+ if (TestClearPageError(page))
+ ret = -EIO;
+
+ put_page(page);
+
+ if (ret)
+ break;
}
ret2 = filemap_check_errors(NODE_MAPPING(sbi));
if (!ret)
ret = ret2;
+
return ret;
}
@@ -1776,6 +1947,10 @@
if (!PageUptodate(page))
SetPageUptodate(page);
+#ifdef CONFIG_F2FS_CHECK_FS
+ if (IS_INODE(page))
+ f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
if (!PageDirty(page)) {
__set_page_dirty_nobuffers(page);
inc_page_count(F2FS_P_SB(page), F2FS_DIRTY_NODES);
@@ -1970,7 +2145,7 @@
kmem_cache_free(free_nid_slab, i);
}
-static void scan_nat_page(struct f2fs_sb_info *sbi,
+static int scan_nat_page(struct f2fs_sb_info *sbi,
struct page *nat_page, nid_t start_nid)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
@@ -1988,7 +2163,10 @@
break;
blk_addr = le32_to_cpu(nat_blk->entries[i].block_addr);
- f2fs_bug_on(sbi, blk_addr == NEW_ADDR);
+
+ if (blk_addr == NEW_ADDR)
+ return -EINVAL;
+
if (blk_addr == NULL_ADDR) {
add_free_nid(sbi, start_nid, true, true);
} else {
@@ -1997,6 +2175,8 @@
spin_unlock(&NM_I(sbi)->nid_list_lock);
}
}
+
+ return 0;
}
static void scan_curseg_cache(struct f2fs_sb_info *sbi)
@@ -2052,11 +2232,11 @@
up_read(&nm_i->nat_tree_lock);
}
-static void __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
+static int __f2fs_build_free_nids(struct f2fs_sb_info *sbi,
bool sync, bool mount)
{
struct f2fs_nm_info *nm_i = NM_I(sbi);
- int i = 0;
+ int i = 0, ret;
nid_t nid = nm_i->next_scan_nid;
if (unlikely(nid >= nm_i->max_nid))
@@ -2064,17 +2244,17 @@
/* Enough entries */
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
- return;
+ return 0;
if (!sync && !f2fs_available_free_memory(sbi, FREE_NIDS))
- return;
+ return 0;
if (!mount) {
/* try to find free nids in free_nid_bitmap */
scan_free_nid_bits(sbi);
if (nm_i->nid_cnt[FREE_NID] >= NAT_ENTRY_PER_BLOCK)
- return;
+ return 0;
}
/* readahead nat pages to be scanned */
@@ -2088,8 +2268,16 @@
nm_i->nat_block_bitmap)) {
struct page *page = get_current_nat_page(sbi, nid);
- scan_nat_page(sbi, page, nid);
+ ret = scan_nat_page(sbi, page, nid);
f2fs_put_page(page, 1);
+
+ if (ret) {
+ up_read(&nm_i->nat_tree_lock);
+ f2fs_bug_on(sbi, !mount);
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "NAT is corrupt, run fsck to fix it");
+ return -EINVAL;
+ }
}
nid += (NAT_ENTRY_PER_BLOCK - (nid % NAT_ENTRY_PER_BLOCK));
@@ -2110,13 +2298,19 @@
f2fs_ra_meta_pages(sbi, NAT_BLOCK_OFFSET(nm_i->next_scan_nid),
nm_i->ra_nid_pages, META_NAT, false);
+
+ return 0;
}
-void f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
+int f2fs_build_free_nids(struct f2fs_sb_info *sbi, bool sync, bool mount)
{
+ int ret;
+
mutex_lock(&NM_I(sbi)->build_lock);
- __f2fs_build_free_nids(sbi, sync, mount);
+ ret = __f2fs_build_free_nids(sbi, sync, mount);
mutex_unlock(&NM_I(sbi)->build_lock);
+
+ return ret;
}
/*
@@ -2129,12 +2323,11 @@
struct f2fs_nm_info *nm_i = NM_I(sbi);
struct free_nid *i = NULL;
retry:
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_ALLOC_NID)) {
f2fs_show_injection_info(FAULT_ALLOC_NID);
return false;
}
-#endif
+
spin_lock(&nm_i->nid_list_lock);
if (unlikely(nm_i->available_nids == 0)) {
@@ -2279,12 +2472,16 @@
struct dnode_of_data dn;
struct node_info ni;
struct page *xpage;
+ int err;
if (!prev_xnid)
goto recover_xnid;
/* 1: invalidate the previous xattr nid */
- f2fs_get_node_info(sbi, prev_xnid, &ni);
+ err = f2fs_get_node_info(sbi, prev_xnid, &ni);
+ if (err)
+ return err;
+
f2fs_invalidate_blocks(sbi, ni.blk_addr);
dec_valid_node_count(sbi, inode, false);
set_node_addr(sbi, &ni, NULL_ADDR, false);
@@ -2319,8 +2516,11 @@
nid_t ino = ino_of_node(page);
struct node_info old_ni, new_ni;
struct page *ipage;
+ int err;
- f2fs_get_node_info(sbi, ino, &old_ni);
+ err = f2fs_get_node_info(sbi, ino, &old_ni);
+ if (err)
+ return err;
if (unlikely(old_ni.blk_addr != NULL_ADDR))
return -EINVAL;
@@ -2374,7 +2574,7 @@
return 0;
}
-void f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
+int f2fs_restore_node_summary(struct f2fs_sb_info *sbi,
unsigned int segno, struct f2fs_summary_block *sum)
{
struct f2fs_node *rn;
@@ -2396,6 +2596,9 @@
for (idx = addr; idx < addr + nrpages; idx++) {
struct page *page = f2fs_get_tmp_page(sbi, idx);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
+
rn = F2FS_NODE(page);
sum_entry->nid = rn->footer.nid;
sum_entry->version = 0;
@@ -2407,6 +2610,7 @@
invalidate_mapping_pages(META_MAPPING(sbi), addr,
addr + nrpages);
}
+ return 0;
}
static void remove_nats_in_journal(struct f2fs_sb_info *sbi)
@@ -2584,6 +2788,13 @@
nid_t set_idx = 0;
LIST_HEAD(sets);
+ /* during unmount, let's flush nat_bits before checking dirty_nat_cnt */
+ if (enabled_nat_bits(sbi, cpc)) {
+ down_write(&nm_i->nat_tree_lock);
+ remove_nats_in_journal(sbi);
+ up_write(&nm_i->nat_tree_lock);
+ }
+
if (!nm_i->dirty_nat_cnt)
return;
@@ -2636,7 +2847,13 @@
nat_bits_addr = __start_cp_addr(sbi) + sbi->blocks_per_seg -
nm_i->nat_bits_blocks;
for (i = 0; i < nm_i->nat_bits_blocks; i++) {
- struct page *page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+ struct page *page;
+
+ page = f2fs_get_meta_page(sbi, nat_bits_addr++);
+ if (IS_ERR(page)) {
+ disable_nat_bits(sbi, true);
+ return PTR_ERR(page);
+ }
memcpy(nm_i->nat_bits + (i << F2FS_BLKSIZE_BITS),
page_address(page), F2FS_BLKSIZE);
@@ -2720,6 +2937,7 @@
INIT_RADIX_TREE(&nm_i->nat_root, GFP_NOIO);
INIT_RADIX_TREE(&nm_i->nat_set_root, GFP_NOIO);
INIT_LIST_HEAD(&nm_i->nat_entries);
+ spin_lock_init(&nm_i->nat_list_lock);
mutex_init(&nm_i->build_lock);
spin_lock_init(&nm_i->nid_list_lock);
@@ -2764,8 +2982,8 @@
for (i = 0; i < nm_i->nat_blocks; i++) {
nm_i->free_nid_bitmap[i] = f2fs_kvzalloc(sbi,
- NAT_ENTRY_BITMAP_SIZE_ALIGNED, GFP_KERNEL);
- if (!nm_i->free_nid_bitmap)
+ f2fs_bitmap_size(NAT_ENTRY_PER_BLOCK), GFP_KERNEL);
+ if (!nm_i->free_nid_bitmap[i])
return -ENOMEM;
}
@@ -2803,8 +3021,7 @@
/* load free nid status from nat_bits table */
load_free_nid_bitmap(sbi);
- f2fs_build_free_nids(sbi, true, true);
- return 0;
+ return f2fs_build_free_nids(sbi, true, true);
}
void f2fs_destroy_node_manager(struct f2fs_sb_info *sbi)
@@ -2839,8 +3056,13 @@
unsigned idx;
nid = nat_get_nid(natvec[found - 1]) + 1;
- for (idx = 0; idx < found; idx++)
+ for (idx = 0; idx < found; idx++) {
+ spin_lock(&nm_i->nat_list_lock);
+ list_del(&natvec[idx]->list);
+ spin_unlock(&nm_i->nat_list_lock);
+
__del_from_nat_cache(nm_i, natvec[idx]);
+ }
}
f2fs_bug_on(sbi, nm_i->nat_cnt);
@@ -2895,8 +3117,15 @@
sizeof(struct nat_entry_set));
if (!nat_entry_set_slab)
goto destroy_free_nid;
+
+ fsync_node_entry_slab = f2fs_kmem_cache_create("fsync_node_entry",
+ sizeof(struct fsync_node_entry));
+ if (!fsync_node_entry_slab)
+ goto destroy_nat_entry_set;
return 0;
+destroy_nat_entry_set:
+ kmem_cache_destroy(nat_entry_set_slab);
destroy_free_nid:
kmem_cache_destroy(free_nid_slab);
destroy_nat_entry:
@@ -2907,6 +3136,7 @@
void f2fs_destroy_node_manager_caches(void)
{
+ kmem_cache_destroy(fsync_node_entry_slab);
kmem_cache_destroy(nat_entry_set_slab);
kmem_cache_destroy(free_nid_slab);
kmem_cache_destroy(nat_entry_slab);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index b95e49e..0f4db7a 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -135,6 +135,11 @@
return NM_I(sbi)->nat_cnt >= DEF_NAT_CACHE_THRESHOLD;
}
+static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
+{
+ return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
+}
+
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
@@ -444,6 +449,10 @@
else
flag &= ~(0x1 << type);
rn->footer.flag = cpu_to_le32(flag);
+
+#ifdef CONFIG_F2FS_CHECK_FS
+ f2fs_inode_chksum_set(F2FS_P_SB(page), page);
+#endif
}
#define set_dentry_mark(page, mark) set_mark(page, mark, DENT_BIT_SHIFT)
#define set_fsync_mark(page, mark) set_mark(page, mark, FSYNC_BIT_SHIFT)
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index daf81d4..501bb0f 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -241,8 +241,8 @@
struct page *page = NULL;
block_t blkaddr;
unsigned int loop_cnt = 0;
- unsigned int free_blocks = sbi->user_block_count -
- valid_user_blocks(sbi);
+ unsigned int free_blocks = MAIN_SEGS(sbi) * sbi->blocks_per_seg -
+ valid_user_blocks(sbi);
int err = 0;
/* get node pages in the current segment */
@@ -252,10 +252,14 @@
while (1) {
struct fsync_inode_entry *entry;
- if (!f2fs_is_valid_meta_blkaddr(sbi, blkaddr, META_POR))
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
return 0;
page = f2fs_get_tmp_page(sbi, blkaddr);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ break;
+ }
if (!is_recoverable_dnode(page))
break;
@@ -471,7 +475,10 @@
f2fs_wait_on_page_writeback(dn.node_page, NODE, true);
- f2fs_get_node_info(sbi, dn.nid, &ni);
+ err = f2fs_get_node_info(sbi, dn.nid, &ni);
+ if (err)
+ goto err;
+
f2fs_bug_on(sbi, ni.ino != ino_of_node(page));
f2fs_bug_on(sbi, ofs_of_node(dn.node_page) != ofs_of_node(page));
@@ -507,14 +514,13 @@
}
/* dest is valid block, try to recover from src to dest */
- if (f2fs_is_valid_meta_blkaddr(sbi, dest, META_POR)) {
+ if (f2fs_is_valid_blkaddr(sbi, dest, META_POR)) {
if (src == NULL_ADDR) {
err = f2fs_reserve_new_block(&dn);
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- while (err)
+ while (err &&
+ IS_ENABLED(CONFIG_F2FS_FAULT_INJECTION))
err = f2fs_reserve_new_block(&dn);
-#endif
/* We should not get -ENOSPC */
f2fs_bug_on(sbi, err);
if (err)
@@ -568,12 +574,16 @@
while (1) {
struct fsync_inode_entry *entry;
- if (!f2fs_is_valid_meta_blkaddr(sbi, blkaddr, META_POR))
+ if (!f2fs_is_valid_blkaddr(sbi, blkaddr, META_POR))
break;
f2fs_ra_meta_pages_cond(sbi, blkaddr);
page = f2fs_get_tmp_page(sbi, blkaddr);
+ if (IS_ERR(page)) {
+ err = PTR_ERR(page);
+ break;
+ }
if (!is_recoverable_dnode(page)) {
f2fs_put_page(page, 1);
@@ -628,7 +638,8 @@
#endif
if (s_flags & MS_RDONLY) {
- f2fs_msg(sbi->sb, KERN_INFO, "orphan cleanup on readonly fs");
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "recover fsync data on readonly fs");
sbi->sb->s_flags &= ~MS_RDONLY;
}
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index 52e85a0..1da9a3c 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -250,7 +250,13 @@
err = -EAGAIN;
goto next;
}
- f2fs_get_node_info(sbi, dn.nid, &ni);
+
+ err = f2fs_get_node_info(sbi, dn.nid, &ni);
+ if (err) {
+ f2fs_put_dnode(&dn);
+ return err;
+ }
+
if (cur->old_addr == NEW_ADDR) {
f2fs_invalidate_blocks(sbi, dn.data_blkaddr);
f2fs_update_data_blkaddr(&dn, NEW_ADDR);
@@ -439,8 +445,10 @@
int err;
f2fs_balance_fs(sbi, true);
- f2fs_lock_op(sbi);
+ down_write(&fi->i_gc_rwsem[WRITE]);
+
+ f2fs_lock_op(sbi);
set_inode_flag(inode, FI_ATOMIC_COMMIT);
mutex_lock(&fi->inmem_lock);
@@ -455,6 +463,8 @@
clear_inode_flag(inode, FI_ATOMIC_COMMIT);
f2fs_unlock_op(sbi);
+ up_write(&fi->i_gc_rwsem[WRITE]);
+
return err;
}
@@ -464,12 +474,10 @@
*/
void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
{
-#ifdef CONFIG_F2FS_FAULT_INJECTION
if (time_to_inject(sbi, FAULT_CHECKPOINT)) {
f2fs_show_injection_info(FAULT_CHECKPOINT);
f2fs_stop_checkpoint(sbi, false);
}
-#endif
/* balance_fs_bg is able to be pending */
if (need && excess_cached_nats(sbi))
@@ -503,7 +511,8 @@
else
f2fs_build_free_nids(sbi, false, false);
- if (!is_idle(sbi) && !excess_dirty_nats(sbi))
+ if (!is_idle(sbi) &&
+ (!excess_dirty_nats(sbi) && !excess_dirty_nodes(sbi)))
return;
/* checkpoint is the only way to shrink partial cached entries */
@@ -511,6 +520,7 @@
!f2fs_available_free_memory(sbi, INO_ENTRIES) ||
excess_prefree_segs(sbi) ||
excess_dirty_nats(sbi) ||
+ excess_dirty_nodes(sbi) ||
f2fs_time_over(sbi, CP_TIME)) {
if (test_opt(sbi, DATA_FLUSH)) {
struct blk_plug plug;
@@ -831,9 +841,12 @@
dc->len = len;
dc->ref = 0;
dc->state = D_PREP;
+ dc->issuing = 0;
dc->error = 0;
init_completion(&dc->wait);
list_add_tail(&dc->list, pend_list);
+ spin_lock_init(&dc->lock);
+ dc->bio_ref = 0;
atomic_inc(&dcc->discard_cmd_cnt);
dcc->undiscard_blks += len;
@@ -860,7 +873,7 @@
struct discard_cmd *dc)
{
if (dc->state == D_DONE)
- atomic_dec(&dcc->issing_discard);
+ atomic_sub(dc->issuing, &dcc->issing_discard);
list_del(&dc->list);
rb_erase(&dc->rb_node, &dcc->root);
@@ -875,9 +888,17 @@
struct discard_cmd *dc)
{
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ unsigned long flags;
trace_f2fs_remove_discard(dc->bdev, dc->start, dc->len);
+ spin_lock_irqsave(&dc->lock, flags);
+ if (dc->bio_ref) {
+ spin_unlock_irqrestore(&dc->lock, flags);
+ return;
+ }
+ spin_unlock_irqrestore(&dc->lock, flags);
+
f2fs_bug_on(sbi, dc->ref);
if (dc->error == -EOPNOTSUPP)
@@ -893,10 +914,17 @@
static void f2fs_submit_discard_endio(struct bio *bio)
{
struct discard_cmd *dc = (struct discard_cmd *)bio->bi_private;
+ unsigned long flags;
dc->error = bio->bi_error;
- dc->state = D_DONE;
- complete_all(&dc->wait);
+
+ spin_lock_irqsave(&dc->lock, flags);
+ dc->bio_ref--;
+ if (!dc->bio_ref && dc->state == D_SUBMIT) {
+ dc->state = D_DONE;
+ complete_all(&dc->wait);
+ }
+ spin_unlock_irqrestore(&dc->lock, flags);
bio_put(bio);
}
@@ -934,6 +962,7 @@
/* common policy */
dpolicy->type = discard_type;
dpolicy->sync = true;
+ dpolicy->ordered = false;
dpolicy->granularity = granularity;
dpolicy->max_requests = DEF_MAX_DISCARD_REQUEST;
@@ -945,6 +974,7 @@
dpolicy->max_interval = DEF_MAX_DISCARD_ISSUE_TIME;
dpolicy->io_aware = true;
dpolicy->sync = false;
+ dpolicy->ordered = true;
if (utilization(sbi) > DEF_DISCARD_URGENT_UTIL) {
dpolicy->granularity = 1;
dpolicy->max_interval = DEF_MIN_DISCARD_ISSUE_TIME;
@@ -962,48 +992,115 @@
}
}
-
+static void __update_discard_tree_range(struct f2fs_sb_info *sbi,
+ struct block_device *bdev, block_t lstart,
+ block_t start, block_t len);
/* this function is copied from blkdev_issue_discard from block/blk-lib.c */
-static void __submit_discard_cmd(struct f2fs_sb_info *sbi,
+static int __submit_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
- struct discard_cmd *dc)
+ struct discard_cmd *dc,
+ unsigned int *issued)
{
+ struct block_device *bdev = dc->bdev;
+ struct request_queue *q = bdev_get_queue(bdev);
+ unsigned int max_discard_blocks =
+ SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
struct list_head *wait_list = (dpolicy->type == DPOLICY_FSTRIM) ?
&(dcc->fstrim_list) : &(dcc->wait_list);
- struct bio *bio = NULL;
int flag = dpolicy->sync ? REQ_SYNC : 0;
+ block_t lstart, start, len, total_len;
+ int err = 0;
if (dc->state != D_PREP)
- return;
+ return 0;
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK))
- return;
+ return 0;
- trace_f2fs_issue_discard(dc->bdev, dc->start, dc->len);
+ trace_f2fs_issue_discard(bdev, dc->start, dc->len);
- dc->error = __blkdev_issue_discard(dc->bdev,
- SECTOR_FROM_BLOCK(dc->start),
- SECTOR_FROM_BLOCK(dc->len),
- GFP_NOFS, 0, &bio);
- if (!dc->error) {
- /* should keep before submission to avoid D_DONE right away */
- dc->state = D_SUBMIT;
- atomic_inc(&dcc->issued_discard);
- atomic_inc(&dcc->issing_discard);
- if (bio) {
- bio->bi_private = dc;
- bio->bi_end_io = f2fs_submit_discard_endio;
- bio->bi_opf |= flag;
- submit_bio(bio);
- list_move_tail(&dc->list, wait_list);
- __check_sit_bitmap(sbi, dc->start, dc->start + dc->len);
+ lstart = dc->lstart;
+ start = dc->start;
+ len = dc->len;
+ total_len = len;
- f2fs_update_iostat(sbi, FS_DISCARD, 1);
+ dc->len = 0;
+
+ while (total_len && *issued < dpolicy->max_requests && !err) {
+ struct bio *bio = NULL;
+ unsigned long flags;
+ bool last = true;
+
+ if (len > max_discard_blocks) {
+ len = max_discard_blocks;
+ last = false;
}
- } else {
- __remove_discard_cmd(sbi, dc);
+
+ (*issued)++;
+ if (*issued == dpolicy->max_requests)
+ last = true;
+
+ dc->len += len;
+
+ if (time_to_inject(sbi, FAULT_DISCARD)) {
+ f2fs_show_injection_info(FAULT_DISCARD);
+ err = -EIO;
+ goto submit;
+ }
+ err = __blkdev_issue_discard(bdev,
+ SECTOR_FROM_BLOCK(start),
+ SECTOR_FROM_BLOCK(len),
+ GFP_NOFS, 0, &bio);
+submit:
+ if (err) {
+ spin_lock_irqsave(&dc->lock, flags);
+ if (dc->state == D_PARTIAL)
+ dc->state = D_SUBMIT;
+ spin_unlock_irqrestore(&dc->lock, flags);
+
+ break;
+ }
+
+ f2fs_bug_on(sbi, !bio);
+
+ /*
+ * should keep before submission to avoid D_DONE
+ * right away
+ */
+ spin_lock_irqsave(&dc->lock, flags);
+ if (last)
+ dc->state = D_SUBMIT;
+ else
+ dc->state = D_PARTIAL;
+ dc->bio_ref++;
+ spin_unlock_irqrestore(&dc->lock, flags);
+
+ atomic_inc(&dcc->issing_discard);
+ dc->issuing++;
+ list_move_tail(&dc->list, wait_list);
+
+ /* sanity check on discard range */
+ __check_sit_bitmap(sbi, start, start + len);
+
+ bio->bi_private = dc;
+ bio->bi_end_io = f2fs_submit_discard_endio;
+ bio->bi_opf |= flag;
+ submit_bio(bio);
+
+ atomic_inc(&dcc->issued_discard);
+
+ f2fs_update_iostat(sbi, FS_DISCARD, 1);
+
+ lstart += len;
+ start += len;
+ total_len -= len;
+ len = total_len;
}
+
+ if (!err && len)
+ __update_discard_tree_range(sbi, bdev, lstart, start, len);
+ return err;
}
static struct discard_cmd *__insert_discard_tree(struct f2fs_sb_info *sbi,
@@ -1084,10 +1181,11 @@
struct discard_cmd *dc;
struct discard_info di = {0};
struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ struct request_queue *q = bdev_get_queue(bdev);
+ unsigned int max_discard_blocks =
+ SECTOR_TO_BLOCK(q->limits.max_discard_sectors);
block_t end = lstart + len;
- mutex_lock(&dcc->cmd_lock);
-
dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
NULL, lstart,
(struct rb_entry **)&prev_dc,
@@ -1127,7 +1225,8 @@
if (prev_dc && prev_dc->state == D_PREP &&
prev_dc->bdev == bdev &&
- __is_discard_back_mergeable(&di, &prev_dc->di)) {
+ __is_discard_back_mergeable(&di, &prev_dc->di,
+ max_discard_blocks)) {
prev_dc->di.len += di.len;
dcc->undiscard_blks += di.len;
__relocate_discard_cmd(dcc, prev_dc);
@@ -1138,7 +1237,8 @@
if (next_dc && next_dc->state == D_PREP &&
next_dc->bdev == bdev &&
- __is_discard_front_mergeable(&di, &next_dc->di)) {
+ __is_discard_front_mergeable(&di, &next_dc->di,
+ max_discard_blocks)) {
next_dc->di.lstart = di.lstart;
next_dc->di.len += di.len;
next_dc->di.start = di.start;
@@ -1161,8 +1261,6 @@
node = rb_next(&prev_dc->rb_node);
next_dc = rb_entry_safe(node, struct discard_cmd, rb_node);
}
-
- mutex_unlock(&dcc->cmd_lock);
}
static int __queue_discard_cmd(struct f2fs_sb_info *sbi,
@@ -1177,10 +1275,72 @@
blkstart -= FDEV(devi).start_blk;
}
+ mutex_lock(&SM_I(sbi)->dcc_info->cmd_lock);
__update_discard_tree_range(sbi, bdev, lblkstart, blkstart, blklen);
+ mutex_unlock(&SM_I(sbi)->dcc_info->cmd_lock);
return 0;
}
+static unsigned int __issue_discard_cmd_orderly(struct f2fs_sb_info *sbi,
+ struct discard_policy *dpolicy)
+{
+ struct discard_cmd_control *dcc = SM_I(sbi)->dcc_info;
+ struct discard_cmd *prev_dc = NULL, *next_dc = NULL;
+ struct rb_node **insert_p = NULL, *insert_parent = NULL;
+ struct discard_cmd *dc;
+ struct blk_plug plug;
+ unsigned int pos = dcc->next_pos;
+ unsigned int issued = 0;
+ bool io_interrupted = false;
+
+ mutex_lock(&dcc->cmd_lock);
+ dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
+ NULL, pos,
+ (struct rb_entry **)&prev_dc,
+ (struct rb_entry **)&next_dc,
+ &insert_p, &insert_parent, true);
+ if (!dc)
+ dc = next_dc;
+
+ blk_start_plug(&plug);
+
+ while (dc) {
+ struct rb_node *node;
+ int err = 0;
+
+ if (dc->state != D_PREP)
+ goto next;
+
+ if (dpolicy->io_aware && !is_idle(sbi)) {
+ io_interrupted = true;
+ break;
+ }
+
+ dcc->next_pos = dc->lstart + dc->len;
+ err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+ if (issued >= dpolicy->max_requests)
+ break;
+next:
+ node = rb_next(&dc->rb_node);
+ if (err)
+ __remove_discard_cmd(sbi, dc);
+ dc = rb_entry_safe(node, struct discard_cmd, rb_node);
+ }
+
+ blk_finish_plug(&plug);
+
+ if (!dc)
+ dcc->next_pos = 0;
+
+ mutex_unlock(&dcc->cmd_lock);
+
+ if (!issued && io_interrupted)
+ issued = -1;
+
+ return issued;
+}
+
static int __issue_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy)
{
@@ -1188,19 +1348,24 @@
struct list_head *pend_list;
struct discard_cmd *dc, *tmp;
struct blk_plug plug;
- int i, iter = 0, issued = 0;
+ int i, issued = 0;
bool io_interrupted = false;
for (i = MAX_PLIST_NUM - 1; i >= 0; i--) {
if (i + 1 < dpolicy->granularity)
break;
+
+ if (i < DEFAULT_DISCARD_GRANULARITY && dpolicy->ordered)
+ return __issue_discard_cmd_orderly(sbi, dpolicy);
+
pend_list = &dcc->pend_list[i];
mutex_lock(&dcc->cmd_lock);
if (list_empty(pend_list))
goto next;
- f2fs_bug_on(sbi,
- !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
+ if (unlikely(dcc->rbtree_check))
+ f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+ &dcc->root));
blk_start_plug(&plug);
list_for_each_entry_safe(dc, tmp, pend_list, list) {
f2fs_bug_on(sbi, dc->state != D_PREP);
@@ -1208,20 +1373,19 @@
if (dpolicy->io_aware && i < dpolicy->io_aware_gran &&
!is_idle(sbi)) {
io_interrupted = true;
- goto skip;
+ break;
}
- __submit_discard_cmd(sbi, dpolicy, dc);
- issued++;
-skip:
- if (++iter >= dpolicy->max_requests)
+ __submit_discard_cmd(sbi, dpolicy, dc, &issued);
+
+ if (issued >= dpolicy->max_requests)
break;
}
blk_finish_plug(&plug);
next:
mutex_unlock(&dcc->cmd_lock);
- if (iter >= dpolicy->max_requests)
+ if (issued >= dpolicy->max_requests || io_interrupted)
break;
}
@@ -1319,21 +1483,22 @@
return trimmed;
}
-static void __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
+static unsigned int __wait_all_discard_cmd(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy)
{
struct discard_policy dp;
+ unsigned int discard_blks;
- if (dpolicy) {
- __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
- return;
- }
+ if (dpolicy)
+ return __wait_discard_cmd_range(sbi, dpolicy, 0, UINT_MAX);
/* wait all */
__init_discard_policy(sbi, &dp, DPOLICY_FSTRIM, 1);
- __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+ discard_blks = __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
__init_discard_policy(sbi, &dp, DPOLICY_UMOUNT, 1);
- __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+ discard_blks += __wait_discard_cmd_range(sbi, &dp, 0, UINT_MAX);
+
+ return discard_blks;
}
/* This should be covered by global mutex, &sit_i->sentry_lock */
@@ -1386,6 +1551,8 @@
/* just to make sure there is no pending discard commands */
__wait_all_discard_cmd(sbi, NULL);
+
+ f2fs_bug_on(sbi, atomic_read(&dcc->discard_cmd_cnt));
return dropped;
}
@@ -1649,21 +1816,30 @@
unsigned int start = 0, end = -1;
unsigned int secno, start_segno;
bool force = (cpc->reason & CP_DISCARD);
+ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
mutex_lock(&dirty_i->seglist_lock);
while (1) {
int i;
+
+ if (need_align && end != -1)
+ end--;
start = find_next_bit(prefree_map, MAIN_SEGS(sbi), end + 1);
if (start >= MAIN_SEGS(sbi))
break;
end = find_next_zero_bit(prefree_map, MAIN_SEGS(sbi),
start + 1);
- for (i = start; i < end; i++)
- clear_bit(i, prefree_map);
+ if (need_align) {
+ start = rounddown(start, sbi->segs_per_sec);
+ end = roundup(end, sbi->segs_per_sec);
+ }
- dirty_i->nr_dirty[PRE] -= end - start;
+ for (i = start; i < end; i++) {
+ if (test_and_clear_bit(i, prefree_map))
+ dirty_i->nr_dirty[PRE]--;
+ }
if (!test_opt(sbi, DISCARD))
continue;
@@ -1757,7 +1933,9 @@
dcc->nr_discards = 0;
dcc->max_discards = MAIN_SEGS(sbi) << sbi->log_blocks_per_seg;
dcc->undiscard_blks = 0;
+ dcc->next_pos = 0;
dcc->root = RB_ROOT;
+ dcc->rbtree_check = false;
init_waitqueue_head(&dcc->discard_wait_queue);
SM_I(sbi)->dcc_info = dcc;
@@ -1907,6 +2085,8 @@
if (addr == NEW_ADDR)
return;
+ invalidate_mapping_pages(META_MAPPING(sbi), addr, addr);
+
/* add it into sit main buffer */
down_write(&sit_i->sentry_lock);
@@ -1925,7 +2105,7 @@
struct seg_entry *se;
bool is_cp = false;
- if (!is_valid_blkaddr(blkaddr))
+ if (!is_valid_data_blkaddr(sbi, blkaddr))
return true;
down_read(&sit_i->sentry_lock);
@@ -1989,7 +2169,7 @@
*/
struct page *f2fs_get_sum_page(struct f2fs_sb_info *sbi, unsigned int segno)
{
- return f2fs_get_meta_page(sbi, GET_SUM_BLOCK(sbi, segno));
+ return f2fs_get_meta_page_nofail(sbi, GET_SUM_BLOCK(sbi, segno));
}
void f2fs_update_meta_page(struct f2fs_sb_info *sbi,
@@ -2372,7 +2552,7 @@
return has_candidate;
}
-static void __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
+static unsigned int __issue_discard_cmd_range(struct f2fs_sb_info *sbi,
struct discard_policy *dpolicy,
unsigned int start, unsigned int end)
{
@@ -2382,12 +2562,15 @@
struct discard_cmd *dc;
struct blk_plug plug;
int issued;
+ unsigned int trimmed = 0;
next:
issued = 0;
mutex_lock(&dcc->cmd_lock);
- f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi, &dcc->root));
+ if (unlikely(dcc->rbtree_check))
+ f2fs_bug_on(sbi, !f2fs_check_rb_tree_consistence(sbi,
+ &dcc->root));
dc = (struct discard_cmd *)f2fs_lookup_rb_tree_ret(&dcc->root,
NULL, start,
@@ -2401,6 +2584,7 @@
while (dc && dc->lstart <= end) {
struct rb_node *node;
+ int err = 0;
if (dc->len < dpolicy->granularity)
goto skip;
@@ -2410,19 +2594,24 @@
goto skip;
}
- __submit_discard_cmd(sbi, dpolicy, dc);
+ err = __submit_discard_cmd(sbi, dpolicy, dc, &issued);
- if (++issued >= dpolicy->max_requests) {
+ if (issued >= dpolicy->max_requests) {
start = dc->lstart + dc->len;
+ if (err)
+ __remove_discard_cmd(sbi, dc);
+
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
- __wait_all_discard_cmd(sbi, NULL);
+ trimmed += __wait_all_discard_cmd(sbi, NULL);
congestion_wait(BLK_RW_ASYNC, HZ/50);
goto next;
}
skip:
node = rb_next(&dc->rb_node);
+ if (err)
+ __remove_discard_cmd(sbi, dc);
dc = rb_entry_safe(node, struct discard_cmd, rb_node);
if (fatal_signal_pending(current))
@@ -2431,6 +2620,8 @@
blk_finish_plug(&plug);
mutex_unlock(&dcc->cmd_lock);
+
+ return trimmed;
}
int f2fs_trim_fs(struct f2fs_sb_info *sbi, struct fstrim_range *range)
@@ -2443,12 +2634,13 @@
struct discard_policy dpolicy;
unsigned long long trimmed = 0;
int err = 0;
+ bool need_align = test_opt(sbi, LFS) && sbi->segs_per_sec > 1;
if (start >= MAX_BLKADDR(sbi) || range->len < sbi->blocksize)
return -EINVAL;
- if (end <= MAIN_BLKADDR(sbi))
- return -EINVAL;
+ if (end < MAIN_BLKADDR(sbi))
+ goto out;
if (is_sbi_flag_set(sbi, SBI_NEED_FSCK)) {
f2fs_msg(sbi->sb, KERN_WARNING,
@@ -2460,6 +2652,10 @@
start_segno = (start <= MAIN_BLKADDR(sbi)) ? 0 : GET_SEGNO(sbi, start);
end_segno = (end >= MAX_BLKADDR(sbi)) ? MAIN_SEGS(sbi) - 1 :
GET_SEGNO(sbi, end);
+ if (need_align) {
+ start_segno = rounddown(start_segno, sbi->segs_per_sec);
+ end_segno = roundup(end_segno + 1, sbi->segs_per_sec) - 1;
+ }
cpc.reason = CP_DISCARD;
cpc.trim_minlen = max_t(__u64, 1, F2FS_BYTES_TO_BLK(range->minlen));
@@ -2475,24 +2671,27 @@
if (err)
goto out;
- start_block = START_BLOCK(sbi, start_segno);
- end_block = START_BLOCK(sbi, end_segno + 1);
-
- __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
- __issue_discard_cmd_range(sbi, &dpolicy, start_block, end_block);
-
/*
* We filed discard candidates, but actually we don't need to wait for
* all of them, since they'll be issued in idle time along with runtime
* discard option. User configuration looks like using runtime discard
* or periodic fstrim instead of it.
*/
- if (!test_opt(sbi, DISCARD)) {
- trimmed = __wait_discard_cmd_range(sbi, &dpolicy,
+ if (test_opt(sbi, DISCARD))
+ goto out;
+
+ start_block = START_BLOCK(sbi, start_segno);
+ end_block = START_BLOCK(sbi, end_segno + 1);
+
+ __init_discard_policy(sbi, &dpolicy, DPOLICY_FSTRIM, cpc.trim_minlen);
+ trimmed = __issue_discard_cmd_range(sbi, &dpolicy,
start_block, end_block);
- range->len = F2FS_BLK_TO_BYTES(trimmed);
- }
+
+ trimmed += __wait_discard_cmd_range(sbi, &dpolicy,
+ start_block, end_block);
out:
+ if (!err)
+ range->len = F2FS_BLK_TO_BYTES(trimmed);
return err;
}
@@ -2645,8 +2844,8 @@
return CURSEG_COLD_DATA;
if (file_is_hot(inode) ||
is_inode_flag_set(inode, FI_HOT_DATA) ||
- is_inode_flag_set(inode, FI_ATOMIC_FILE) ||
- is_inode_flag_set(inode, FI_VOLATILE_FILE))
+ f2fs_is_atomic_file(inode) ||
+ f2fs_is_volatile_file(inode))
return CURSEG_HOT_DATA;
/* f2fs_rw_hint_to_seg_type(inode->i_write_hint); */
return CURSEG_WARM_DATA;
@@ -2788,6 +2987,9 @@
reallocate:
f2fs_allocate_data_block(fio->sbi, fio->page, fio->old_blkaddr,
&fio->new_blkaddr, sum, type, fio, true);
+ if (GET_SEGNO(fio->sbi, fio->old_blkaddr) != NULL_SEGNO)
+ invalidate_mapping_pages(META_MAPPING(fio->sbi),
+ fio->old_blkaddr, fio->old_blkaddr);
/* writeout dirty page into bdev */
f2fs_submit_page_write(fio);
@@ -2843,11 +3045,9 @@
{
struct f2fs_sb_info *sbi = fio->sbi;
struct f2fs_summary sum;
- struct node_info ni;
f2fs_bug_on(sbi, dn->data_blkaddr == NULL_ADDR);
- f2fs_get_node_info(sbi, dn->nid, &ni);
- set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
+ set_summary(&sum, dn->nid, dn->ofs_in_node, fio->version);
do_write_page(&sum, fio);
f2fs_update_data_blkaddr(dn, fio->new_blkaddr);
@@ -2944,8 +3144,11 @@
if (!recover_curseg || recover_newaddr)
update_sit_entry(sbi, new_blkaddr, 1);
- if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO)
+ if (GET_SEGNO(sbi, old_blkaddr) != NULL_SEGNO) {
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ old_blkaddr, old_blkaddr);
update_sit_entry(sbi, old_blkaddr, -1);
+ }
locate_dirty_segment(sbi, GET_SEGNO(sbi, old_blkaddr));
locate_dirty_segment(sbi, GET_SEGNO(sbi, new_blkaddr));
@@ -2999,7 +3202,7 @@
{
struct page *cpage;
- if (!is_valid_blkaddr(blkaddr))
+ if (!is_valid_data_blkaddr(sbi, blkaddr))
return;
cpage = find_lock_page(META_MAPPING(sbi), blkaddr);
@@ -3009,7 +3212,7 @@
}
}
-static void read_compacted_summaries(struct f2fs_sb_info *sbi)
+static int read_compacted_summaries(struct f2fs_sb_info *sbi)
{
struct f2fs_checkpoint *ckpt = F2FS_CKPT(sbi);
struct curseg_info *seg_i;
@@ -3021,6 +3224,8 @@
start = start_sum_block(sbi);
page = f2fs_get_meta_page(sbi, start++);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
kaddr = (unsigned char *)page_address(page);
/* Step 1: restore nat cache */
@@ -3061,11 +3266,14 @@
page = NULL;
page = f2fs_get_meta_page(sbi, start++);
+ if (IS_ERR(page))
+ return PTR_ERR(page);
kaddr = (unsigned char *)page_address(page);
offset = 0;
}
}
f2fs_put_page(page, 1);
+ return 0;
}
static int read_normal_summaries(struct f2fs_sb_info *sbi, int type)
@@ -3077,6 +3285,7 @@
unsigned short blk_off;
unsigned int segno = 0;
block_t blk_addr = 0;
+ int err = 0;
/* get segment number and block addr */
if (IS_DATASEG(type)) {
@@ -3100,6 +3309,8 @@
}
new = f2fs_get_meta_page(sbi, blk_addr);
+ if (IS_ERR(new))
+ return PTR_ERR(new);
sum = (struct f2fs_summary_block *)page_address(new);
if (IS_NODESEG(type)) {
@@ -3111,7 +3322,9 @@
ns->ofs_in_node = 0;
}
} else {
- f2fs_restore_node_summary(sbi, segno, sum);
+ err = f2fs_restore_node_summary(sbi, segno, sum);
+ if (err)
+ goto out;
}
}
@@ -3131,8 +3344,9 @@
curseg->alloc_type = ckpt->alloc_type[type];
curseg->next_blkoff = blk_off;
mutex_unlock(&curseg->curseg_mutex);
+out:
f2fs_put_page(new, 1);
- return 0;
+ return err;
}
static int restore_curseg_summaries(struct f2fs_sb_info *sbi)
@@ -3150,7 +3364,9 @@
META_CP, true);
/* restore for compacted data summary */
- read_compacted_summaries(sbi);
+ err = read_compacted_summaries(sbi);
+ if (err)
+ return err;
type = CURSEG_HOT_NODE;
}
@@ -3281,7 +3497,7 @@
static struct page *get_current_sit_page(struct f2fs_sb_info *sbi,
unsigned int segno)
{
- return f2fs_get_meta_page(sbi, current_sit_addr(sbi, segno));
+ return f2fs_get_meta_page_nofail(sbi, current_sit_addr(sbi, segno));
}
static struct page *get_next_sit_page(struct f2fs_sb_info *sbi,
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 38c549d..b3d9e31 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -85,7 +85,7 @@
(GET_SEGOFF_FROM_SEG0(sbi, blk_addr) & ((sbi)->blocks_per_seg - 1))
#define GET_SEGNO(sbi, blk_addr) \
- ((!is_valid_blkaddr(blk_addr)) ? \
+ ((!is_valid_data_blkaddr(sbi, blk_addr)) ? \
NULL_SEGNO : GET_L2R_SEGNO(FREE_I(sbi), \
GET_SEGNO_FROM_SEG0(sbi, blk_addr)))
#define BLKS_PER_SEC(sbi) \
@@ -215,7 +215,7 @@
#define IS_DUMMY_WRITTEN_PAGE(page) \
(page_private(page) == (unsigned long)DUMMY_WRITTEN_PAGE)
-#define MAX_SKIP_ATOMIC_COUNT 16
+#define MAX_SKIP_GC_COUNT 16
struct inmem_pages {
struct list_head list;
@@ -648,13 +648,10 @@
{
struct f2fs_sb_info *sbi = fio->sbi;
- if (PAGE_TYPE_OF_BIO(fio->type) == META &&
- (!is_read_io(fio->op) || fio->is_meta))
- BUG_ON(blk_addr < SEG0_BLKADDR(sbi) ||
- blk_addr >= MAIN_BLKADDR(sbi));
+ if (__is_meta_io(fio))
+ verify_blkaddr(sbi, blk_addr, META_GENERIC);
else
- BUG_ON(blk_addr < MAIN_BLKADDR(sbi) ||
- blk_addr >= MAX_BLKADDR(sbi));
+ verify_blkaddr(sbi, blk_addr, DATA_GENERIC);
}
/*
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index 343a5cf..bba0cd4 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -41,7 +41,7 @@
#ifdef CONFIG_F2FS_FAULT_INJECTION
-char *fault_name[FAULT_MAX] = {
+char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_KMALLOC] = "kmalloc",
[FAULT_KVMALLOC] = "kvmalloc",
[FAULT_PAGE_ALLOC] = "page alloc",
@@ -55,20 +55,24 @@
[FAULT_TRUNCATE] = "truncate fail",
[FAULT_IO] = "IO error",
[FAULT_CHECKPOINT] = "checkpoint error",
+ [FAULT_DISCARD] = "discard error",
};
-static void f2fs_build_fault_attr(struct f2fs_sb_info *sbi,
- unsigned int rate)
+void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
+ unsigned int type)
{
struct f2fs_fault_info *ffi = &F2FS_OPTION(sbi).fault_info;
if (rate) {
atomic_set(&ffi->inject_ops, 0);
ffi->inject_rate = rate;
- ffi->inject_type = (1 << FAULT_MAX) - 1;
- } else {
- memset(ffi, 0, sizeof(struct f2fs_fault_info));
}
+
+ if (type)
+ ffi->inject_type = type;
+
+ if (!rate && !type)
+ memset(ffi, 0, sizeof(struct f2fs_fault_info));
}
#endif
@@ -113,6 +117,7 @@
Opt_mode,
Opt_io_size_bits,
Opt_fault_injection,
+ Opt_fault_type,
Opt_lazytime,
Opt_nolazytime,
Opt_quota,
@@ -170,6 +175,7 @@
{Opt_mode, "mode=%s"},
{Opt_io_size_bits, "io_bits=%u"},
{Opt_fault_injection, "fault_injection=%u"},
+ {Opt_fault_type, "fault_type=%u"},
{Opt_lazytime, "lazytime"},
{Opt_nolazytime, "nolazytime"},
{Opt_quota, "quota"},
@@ -347,12 +353,6 @@
"QUOTA feature is enabled, so ignore jquota_fmt");
F2FS_OPTION(sbi).s_jquota_fmt = 0;
}
- if (f2fs_sb_has_quota_ino(sbi->sb) && f2fs_readonly(sbi->sb)) {
- f2fs_msg(sbi->sb, KERN_INFO,
- "Filesystem with quota feature cannot be mounted RDWR "
- "without CONFIG_QUOTA");
- return -1;
- }
return 0;
}
#endif
@@ -606,7 +606,18 @@
if (args->from && match_int(args, &arg))
return -EINVAL;
#ifdef CONFIG_F2FS_FAULT_INJECTION
- f2fs_build_fault_attr(sbi, arg);
+ f2fs_build_fault_attr(sbi, arg, F2FS_ALL_FAULT_TYPE);
+ set_opt(sbi, FAULT_INJECTION);
+#else
+ f2fs_msg(sb, KERN_INFO,
+ "FAULT_INJECTION was not selected");
+#endif
+ break;
+ case Opt_fault_type:
+ if (args->from && match_int(args, &arg))
+ return -EINVAL;
+#ifdef CONFIG_F2FS_FAULT_INJECTION
+ f2fs_build_fault_attr(sbi, 0, arg);
set_opt(sbi, FAULT_INJECTION);
#else
f2fs_msg(sb, KERN_INFO,
@@ -775,6 +786,19 @@
#ifdef CONFIG_QUOTA
if (f2fs_check_quota_options(sbi))
return -EINVAL;
+#else
+ if (f2fs_sb_has_quota_ino(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+ f2fs_msg(sbi->sb, KERN_INFO,
+ "Filesystem with quota feature cannot be mounted RDWR "
+ "without CONFIG_QUOTA");
+ return -EINVAL;
+ }
+ if (f2fs_sb_has_project_quota(sbi->sb) && !f2fs_readonly(sbi->sb)) {
+ f2fs_msg(sb, KERN_ERR,
+ "Filesystem with project quota feature cannot be "
+ "mounted RDWR without CONFIG_QUOTA");
+ return -EINVAL;
+ }
#endif
if (F2FS_IO_SIZE_BITS(sbi) && !test_opt(sbi, LFS)) {
@@ -1030,6 +1054,10 @@
/* our cp_error case, we can wait for any writeback page */
f2fs_flush_merged_writes(sbi);
+ f2fs_wait_on_all_pages_writeback(sbi);
+
+ f2fs_bug_on(sbi, sbi->fsync_node_num);
+
iput(sbi->node_inode);
iput(sbi->meta_inode);
@@ -1310,9 +1338,12 @@
if (F2FS_IO_SIZE_BITS(sbi))
seq_printf(seq, ",io_size=%uKB", F2FS_IO_SIZE_KB(sbi));
#ifdef CONFIG_F2FS_FAULT_INJECTION
- if (test_opt(sbi, FAULT_INJECTION))
+ if (test_opt(sbi, FAULT_INJECTION)) {
seq_printf(seq, ",fault_injection=%u",
F2FS_OPTION(sbi).fault_info.inject_rate);
+ seq_printf(seq, ",fault_type=%u",
+ F2FS_OPTION(sbi).fault_info.inject_type);
+ }
#endif
#ifdef CONFIG_QUOTA
if (test_opt(sbi, QUOTA))
@@ -1357,7 +1388,8 @@
F2FS_OPTION(sbi).alloc_mode = ALLOC_MODE_DEFAULT;
F2FS_OPTION(sbi).fsync_mode = FSYNC_MODE_POSIX;
F2FS_OPTION(sbi).test_dummy_encryption = false;
- sbi->readdir_ra = 1;
+ F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
+ F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
set_opt(sbi, BG_GC);
set_opt(sbi, INLINE_XATTR);
@@ -1367,12 +1399,12 @@
set_opt(sbi, NOHEAP);
sbi->sb->s_flags |= MS_LAZYTIME;
set_opt(sbi, FLUSH_MERGE);
- if (f2fs_sb_has_blkzoned(sbi->sb)) {
- set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ if (blk_queue_discard(bdev_get_queue(sbi->sb->s_bdev)))
set_opt(sbi, DISCARD);
- } else {
+ if (f2fs_sb_has_blkzoned(sbi->sb))
+ set_opt_mode(sbi, F2FS_MOUNT_LFS);
+ else
set_opt_mode(sbi, F2FS_MOUNT_ADAPTIVE);
- }
#ifdef CONFIG_F2FS_FS_XATTR
set_opt(sbi, XATTR_USER);
@@ -1381,9 +1413,7 @@
set_opt(sbi, POSIX_ACL);
#endif
-#ifdef CONFIG_F2FS_FAULT_INJECTION
- f2fs_build_fault_attr(sbi, 0);
-#endif
+ f2fs_build_fault_attr(sbi, 0, 0);
}
#ifdef CONFIG_QUOTA
@@ -2231,9 +2261,9 @@
return 1;
}
- if (secs_per_zone > total_sections) {
+ if (secs_per_zone > total_sections || !secs_per_zone) {
f2fs_msg(sb, KERN_INFO,
- "Wrong secs_per_zone (%u > %u)",
+ "Wrong secs_per_zone / total_sections (%u, %u)",
secs_per_zone, total_sections);
return 1;
}
@@ -2287,6 +2317,9 @@
unsigned int sit_segs, nat_segs;
unsigned int sit_bitmap_size, nat_bitmap_size;
unsigned int log_blocks_per_seg;
+ unsigned int segment_count_main;
+ unsigned int cp_pack_start_sum, cp_payload;
+ block_t user_block_count;
int i;
total = le32_to_cpu(raw_super->segment_count);
@@ -2311,6 +2344,16 @@
return 1;
}
+ user_block_count = le64_to_cpu(ckpt->user_block_count);
+ segment_count_main = le32_to_cpu(raw_super->segment_count_main);
+ log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
+ if (!user_block_count || user_block_count >=
+ segment_count_main << log_blocks_per_seg) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong user_block_count: %u", user_block_count);
+ return 1;
+ }
+
main_segs = le32_to_cpu(raw_super->segment_count_main);
blocks_per_seg = sbi->blocks_per_seg;
@@ -2327,7 +2370,6 @@
sit_bitmap_size = le32_to_cpu(ckpt->sit_ver_bitmap_bytesize);
nat_bitmap_size = le32_to_cpu(ckpt->nat_ver_bitmap_bytesize);
- log_blocks_per_seg = le32_to_cpu(raw_super->log_blocks_per_seg);
if (sit_bitmap_size != ((sit_segs / 2) << log_blocks_per_seg) / 8 ||
nat_bitmap_size != ((nat_segs / 2) << log_blocks_per_seg) / 8) {
@@ -2337,6 +2379,17 @@
return 1;
}
+ cp_pack_start_sum = __start_sum_addr(sbi);
+ cp_payload = __cp_payload(sbi);
+ if (cp_pack_start_sum < cp_payload + 1 ||
+ cp_pack_start_sum > blocks_per_seg - 1 -
+ NR_CURSEG_TYPE) {
+ f2fs_msg(sbi->sb, KERN_ERR,
+ "Wrong cp_pack_start_sum: %u",
+ cp_pack_start_sum);
+ return 1;
+ }
+
if (unlikely(f2fs_cp_error(sbi))) {
f2fs_msg(sbi->sb, KERN_ERR, "A bug case: need to run fsck");
return 1;
@@ -2674,6 +2727,8 @@
sm_i->dcc_info->discard_granularity = 1;
sm_i->ipu_policy = 1 << F2FS_IPU_FORCE;
}
+
+ sbi->readdir_ra = 1;
}
static int f2fs_fill_super(struct super_block *sb, void *data, int silent)
@@ -2723,9 +2778,6 @@
sb->s_fs_info = sbi;
sbi->raw_super = raw_super;
- F2FS_OPTION(sbi).s_resuid = make_kuid(&init_user_ns, F2FS_DEF_RESUID);
- F2FS_OPTION(sbi).s_resgid = make_kgid(&init_user_ns, F2FS_DEF_RESGID);
-
/* precompute checksum seed for metadata */
if (f2fs_sb_has_inode_chksum(sb))
sbi->s_chksum_seed = f2fs_chksum(sbi, ~0, raw_super->uuid,
@@ -2889,6 +2941,8 @@
f2fs_init_ino_entry_info(sbi);
+ f2fs_init_fsync_node_info(sbi);
+
/* setup f2fs internal modules */
err = f2fs_build_segment_manager(sbi);
if (err) {
@@ -2935,10 +2989,11 @@
err = PTR_ERR(root);
goto free_stats;
}
- if (!S_ISDIR(root->i_mode) || !root->i_blocks || !root->i_size) {
+ if (!S_ISDIR(root->i_mode) || !root->i_blocks ||
+ !root->i_size || !root->i_nlink) {
iput(root);
err = -EINVAL;
- goto free_node_inode;
+ goto free_stats;
}
sb->s_root = d_make_root(root); /* allocate root dentry */
@@ -2952,10 +3007,7 @@
goto free_root_inode;
#ifdef CONFIG_QUOTA
- /*
- * Turn on quotas which were not enabled for read-only mounts if
- * filesystem has quota feature, so that they are updated correctly.
- */
+ /* Enable quota usage during mount */
if (f2fs_sb_has_quota_ino(sb) && !f2fs_readonly(sb)) {
err = f2fs_enable_quotas(sb);
if (err) {
@@ -3113,9 +3165,19 @@
static void kill_f2fs_super(struct super_block *sb)
{
if (sb->s_root) {
- set_sbi_flag(F2FS_SB(sb), SBI_IS_CLOSE);
- f2fs_stop_gc_thread(F2FS_SB(sb));
- f2fs_stop_discard_thread(F2FS_SB(sb));
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+
+ set_sbi_flag(sbi, SBI_IS_CLOSE);
+ f2fs_stop_gc_thread(sbi);
+ f2fs_stop_discard_thread(sbi);
+
+ if (is_sbi_flag_set(sbi, SBI_IS_DIRTY) ||
+ !is_set_ckpt_flags(sbi, CP_UMOUNT_FLAG)) {
+ struct cp_control cpc = {
+ .reason = CP_UMOUNT,
+ };
+ f2fs_write_checkpoint(sbi, &cpc);
+ }
}
kill_block_super(sb);
}
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index e1b1b31..30fd016 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -9,6 +9,7 @@
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
+#include <linux/compiler.h>
#include <linux/proc_fs.h>
#include <linux/f2fs_fs.h>
#include <linux/seq_file.h>
@@ -252,6 +253,7 @@
if (t >= 1) {
sbi->gc_mode = GC_URGENT;
if (sbi->gc_thread) {
+ sbi->gc_thread->gc_wake = 1;
wake_up_interruptible_all(
&sbi->gc_thread->gc_wait_queue_head);
wake_up_discard_thread(sbi, true);
@@ -286,8 +288,10 @@
bool gc_entry = (!strcmp(a->attr.name, "gc_urgent") ||
a->struct_type == GC_THREAD);
- if (gc_entry)
- down_read(&sbi->sb->s_umount);
+ if (gc_entry) {
+ if (!down_read_trylock(&sbi->sb->s_umount))
+ return -EAGAIN;
+ }
ret = __sbi_store(a, sbi, buf, count);
if (gc_entry)
up_read(&sbi->sb->s_umount);
@@ -518,7 +522,8 @@
.kset = &f2fs_kset,
};
-static int segment_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_info_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -545,7 +550,8 @@
return 0;
}
-static int segment_bits_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused segment_bits_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -569,7 +575,8 @@
return 0;
}
-static int iostat_info_seq_show(struct seq_file *seq, void *offset)
+static int __maybe_unused iostat_info_seq_show(struct seq_file *seq,
+ void *offset)
{
struct super_block *sb = seq->private;
struct f2fs_sb_info *sbi = F2FS_SB(sb);
@@ -611,6 +618,28 @@
return 0;
}
+static int __maybe_unused victim_bits_seq_show(struct seq_file *seq,
+ void *offset)
+{
+ struct super_block *sb = seq->private;
+ struct f2fs_sb_info *sbi = F2FS_SB(sb);
+ struct dirty_seglist_info *dirty_i = DIRTY_I(sbi);
+ int i;
+
+ seq_puts(seq, "format: victim_secmap bitmaps\n");
+
+ for (i = 0; i < MAIN_SECS(sbi); i++) {
+ if ((i % 10) == 0)
+ seq_printf(seq, "%-10d", i);
+ seq_printf(seq, "%d", test_bit(i, dirty_i->victim_secmap) ? 1 : 0);
+ if ((i % 10) == 9 || i == (MAIN_SECS(sbi) - 1))
+ seq_putc(seq, '\n');
+ else
+ seq_putc(seq, ' ');
+ }
+ return 0;
+}
+
#define F2FS_PROC_FILE_DEF(_name) \
static int _name##_open_fs(struct inode *inode, struct file *file) \
{ \
@@ -627,6 +656,7 @@
F2FS_PROC_FILE_DEF(segment_info);
F2FS_PROC_FILE_DEF(segment_bits);
F2FS_PROC_FILE_DEF(iostat_info);
+F2FS_PROC_FILE_DEF(victim_bits);
int __init f2fs_init_sysfs(void)
{
@@ -677,6 +707,8 @@
&f2fs_seq_segment_bits_fops, sb);
proc_create_data("iostat_info", S_IRUGO, sbi->s_proc,
&f2fs_seq_iostat_info_fops, sb);
+ proc_create_data("victim_bits", S_IRUGO, sbi->s_proc,
+ &f2fs_seq_victim_bits_fops, sb);
}
return 0;
}
@@ -687,6 +719,7 @@
remove_proc_entry("iostat_info", sbi->s_proc);
remove_proc_entry("segment_info", sbi->s_proc);
remove_proc_entry("segment_bits", sbi->s_proc);
+ remove_proc_entry("victim_bits", sbi->s_proc);
remove_proc_entry(sbi->sb->s_id, f2fs_proc_root);
}
kobject_del(&sbi->s_kobj);
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 7082718..77a010e 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -37,9 +37,6 @@
return -EOPNOTSUPP;
break;
case F2FS_XATTR_INDEX_TRUSTED:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- break;
case F2FS_XATTR_INDEX_SECURITY:
break;
default:
@@ -62,9 +59,6 @@
return -EOPNOTSUPP;
break;
case F2FS_XATTR_INDEX_TRUSTED:
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- break;
case F2FS_XATTR_INDEX_SECURITY:
break;
default:
@@ -100,12 +94,22 @@
const char *name, const void *value,
size_t size, int flags)
{
+ unsigned char old_advise = F2FS_I(inode)->i_advise;
+ unsigned char new_advise;
+
if (!inode_owner_or_capable(inode))
return -EPERM;
if (value == NULL)
return -EINVAL;
- F2FS_I(inode)->i_advise |= *(char *)value;
+ new_advise = *(char *)value;
+ if (new_advise & ~FADVISE_MODIFIABLE_BITS)
+ return -EINVAL;
+
+ new_advise = new_advise & FADVISE_MODIFIABLE_BITS;
+ new_advise |= old_advise & ~FADVISE_MODIFIABLE_BITS;
+
+ F2FS_I(inode)->i_advise = new_advise;
f2fs_mark_inode_dirty_sync(inode, true);
return 0;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index eef0caf..e949551 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -1725,6 +1725,7 @@
if (status) {
op = &args->ops[0];
op->status = status;
+ resp->opcnt = 1;
goto encode_op;
}
diff --git a/fs/ocfs2/buffer_head_io.c b/fs/ocfs2/buffer_head_io.c
index 8f040f8..25c8b32 100644
--- a/fs/ocfs2/buffer_head_io.c
+++ b/fs/ocfs2/buffer_head_io.c
@@ -341,6 +341,7 @@
* for this bh as it's not marked locally
* uptodate. */
status = -EIO;
+ clear_buffer_needs_validate(bh);
put_bh(bh);
bhs[i] = NULL;
continue;
diff --git a/fs/ocfs2/dlm/dlmmaster.c b/fs/ocfs2/dlm/dlmmaster.c
index 3f828a1..0cc30a5 100644
--- a/fs/ocfs2/dlm/dlmmaster.c
+++ b/fs/ocfs2/dlm/dlmmaster.c
@@ -589,9 +589,9 @@
res->last_used = 0;
- spin_lock(&dlm->spinlock);
+ spin_lock(&dlm->track_lock);
list_add_tail(&res->tracking, &dlm->tracking_list);
- spin_unlock(&dlm->spinlock);
+ spin_unlock(&dlm->track_lock);
memset(res->lvb, 0, DLM_LVB_LEN);
memset(res->refmap, 0, sizeof(res->refmap));
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 0f23b3b..37e04a0 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -455,6 +455,20 @@
int err;
int i;
+ /*
+ * The ability to racily run the kernel stack unwinder on a running task
+ * and then observe the unwinder output is scary; while it is useful for
+ * debugging kernel issues, it can also allow an attacker to leak kernel
+ * stack contents.
+ * Doing this in a manner that is at least safe from races would require
+ * some work to ensure that the remote task can not be scheduled; and
+ * even then, this would still expose the unwinder as local attack
+ * surface.
+ * Therefore, this interface is restricted to root.
+ */
+ if (!file_ns_capable(m->file, &init_user_ns, CAP_SYS_ADMIN))
+ return -EACCES;
+
entries = kmalloc(MAX_STACK_TRACE_DEPTH * sizeof(*entries), GFP_KERNEL);
if (!entries)
return -ENOMEM;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index c2afe39..4298a39 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -457,17 +457,12 @@
return inode;
}
-int proc_fill_super(struct super_block *s, void *data, int silent)
+int proc_fill_super(struct super_block *s)
{
- struct pid_namespace *ns = get_pid_ns(s->s_fs_info);
struct inode *root_inode;
int ret;
- if (!proc_parse_options(data, ns))
- return -EINVAL;
-
- /* User space would break if executables or devices appear on proc */
- s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NOEXEC | SB_I_NODEV;
+ s->s_iflags |= SB_I_USERNS_VISIBLE | SB_I_NODEV;
s->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
s->s_blocksize = 1024;
s->s_blocksize_bits = 10;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 03d8d14..d960512 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -214,7 +214,7 @@
extern void proc_init_inodecache(void);
extern struct inode *proc_get_inode(struct super_block *, struct proc_dir_entry *);
-extern int proc_fill_super(struct super_block *, void *data, int flags);
+extern int proc_fill_super(struct super_block *);
extern void proc_entry_rundown(struct proc_dir_entry *);
/*
@@ -281,7 +281,6 @@
* root.c
*/
extern struct proc_dir_entry proc_root;
-extern int proc_parse_options(char *options, struct pid_namespace *pid);
extern void proc_self_init(void);
extern int proc_remount(struct super_block *, int *, char *);
diff --git a/fs/proc/root.c b/fs/proc/root.c
index c2f5014..1d68fcd 100644
--- a/fs/proc/root.c
+++ b/fs/proc/root.c
@@ -23,6 +23,21 @@
#include "internal.h"
+static int proc_test_super(struct super_block *sb, void *data)
+{
+ return sb->s_fs_info == data;
+}
+
+static int proc_set_super(struct super_block *sb, void *data)
+{
+ int err = set_anon_super(sb, NULL);
+ if (!err) {
+ struct pid_namespace *ns = (struct pid_namespace *)data;
+ sb->s_fs_info = get_pid_ns(ns);
+ }
+ return err;
+}
+
enum {
Opt_gid, Opt_hidepid, Opt_err,
};
@@ -33,7 +48,7 @@
{Opt_err, NULL},
};
-int proc_parse_options(char *options, struct pid_namespace *pid)
+static int proc_parse_options(char *options, struct pid_namespace *pid)
{
char *p;
substring_t args[MAX_OPT_ARGS];
@@ -85,16 +100,45 @@
static struct dentry *proc_mount(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data)
{
+ int err;
+ struct super_block *sb;
struct pid_namespace *ns;
+ char *options;
if (flags & MS_KERNMOUNT) {
- ns = data;
- data = NULL;
+ ns = (struct pid_namespace *)data;
+ options = NULL;
} else {
ns = task_active_pid_ns(current);
+ options = data;
+
+ /* Does the mounter have privilege over the pid namespace? */
+ if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
+ return ERR_PTR(-EPERM);
}
- return mount_ns(fs_type, flags, data, ns, ns->user_ns, proc_fill_super);
+ sb = sget(fs_type, proc_test_super, proc_set_super, flags, ns);
+ if (IS_ERR(sb))
+ return ERR_CAST(sb);
+
+ if (!proc_parse_options(options, ns)) {
+ deactivate_locked_super(sb);
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (!sb->s_root) {
+ err = proc_fill_super(sb);
+ if (err) {
+ deactivate_locked_super(sb);
+ return ERR_PTR(err);
+ }
+
+ sb->s_flags |= MS_ACTIVE;
+ /* User space would break if executables appear on proc */
+ sb->s_iflags |= SB_I_NOEXEC;
+ }
+
+ return dget(sb->s_root);
}
static void proc_kill_sb(struct super_block *sb)
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index d700c42..44475a4 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -21,23 +21,23 @@
#ifdef arch_idle_time
-static cputime64_t get_idle_time(int cpu)
+static u64 get_idle_time(int cpu)
{
- cputime64_t idle;
+ u64 idle;
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
if (cpu_online(cpu) && !nr_iowait_cpu(cpu))
- idle += arch_idle_time(cpu);
+ idle += cputime_to_nsecs(arch_idle_time(cpu));
return idle;
}
-static cputime64_t get_iowait_time(int cpu)
+static u64 get_iowait_time(int cpu)
{
- cputime64_t iowait;
+ u64 iowait;
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
if (cpu_online(cpu) && nr_iowait_cpu(cpu))
- iowait += arch_idle_time(cpu);
+ iowait += cputime_to_nsecs(arch_idle_time(cpu));
return iowait;
}
@@ -45,32 +45,32 @@
static u64 get_idle_time(int cpu)
{
- u64 idle, idle_time = -1ULL;
+ u64 idle, idle_usecs = -1ULL;
if (cpu_online(cpu))
- idle_time = get_cpu_idle_time_us(cpu, NULL);
+ idle_usecs = get_cpu_idle_time_us(cpu, NULL);
- if (idle_time == -1ULL)
+ if (idle_usecs == -1ULL)
/* !NO_HZ or cpu offline so we can rely on cpustat.idle */
idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE];
else
- idle = usecs_to_cputime64(idle_time);
+ idle = idle_usecs * NSEC_PER_USEC;
return idle;
}
static u64 get_iowait_time(int cpu)
{
- u64 iowait, iowait_time = -1ULL;
+ u64 iowait, iowait_usecs = -1ULL;
if (cpu_online(cpu))
- iowait_time = get_cpu_iowait_time_us(cpu, NULL);
+ iowait_usecs = get_cpu_iowait_time_us(cpu, NULL);
- if (iowait_time == -1ULL)
+ if (iowait_usecs == -1ULL)
/* !NO_HZ or cpu offline so we can rely on cpustat.iowait */
iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT];
else
- iowait = usecs_to_cputime64(iowait_time);
+ iowait = iowait_usecs * NSEC_PER_USEC;
return iowait;
}
@@ -115,16 +115,16 @@
}
sum += arch_irq_stat();
- seq_put_decimal_ull(p, "cpu ", cputime64_to_clock_t(user));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
+ seq_put_decimal_ull(p, "cpu ", nsec_to_clock_t(user));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
seq_putc(p, '\n');
for_each_online_cpu(i) {
@@ -140,16 +140,16 @@
guest = kcpustat_cpu(i).cpustat[CPUTIME_GUEST];
guest_nice = kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE];
seq_printf(p, "cpu%d", i);
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(user));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(nice));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(system));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(idle));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(iowait));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(irq));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(softirq));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(steal));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest));
- seq_put_decimal_ull(p, " ", cputime64_to_clock_t(guest_nice));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(user));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(nice));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(system));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(idle));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(iowait));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(irq));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(softirq));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(steal));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest));
+ seq_put_decimal_ull(p, " ", nsec_to_clock_t(guest_nice));
seq_putc(p, '\n');
}
seq_put_decimal_ull(p, "intr ", (unsigned long long)sum);
diff --git a/fs/proc/uptime.c b/fs/proc/uptime.c
index 33de567..7981c4f 100644
--- a/fs/proc/uptime.c
+++ b/fs/proc/uptime.c
@@ -5,23 +5,20 @@
#include <linux/seq_file.h>
#include <linux/time.h>
#include <linux/kernel_stat.h>
-#include <linux/cputime.h>
static int uptime_proc_show(struct seq_file *m, void *v)
{
struct timespec uptime;
struct timespec idle;
- u64 idletime;
u64 nsec;
u32 rem;
int i;
- idletime = 0;
+ nsec = 0;
for_each_possible_cpu(i)
- idletime += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
+ nsec += (__force u64) kcpustat_cpu(i).cpustat[CPUTIME_IDLE];
get_monotonic_boottime(&uptime);
- nsec = cputime64_to_jiffies64(idletime) * TICK_NSEC;
idle.tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem);
idle.tv_nsec = rem;
seq_printf(m, "%lu.%02lu %lu.%02lu\n",
diff --git a/fs/sdcardfs/file.c b/fs/sdcardfs/file.c
index 1461254..271c4c4 100644
--- a/fs/sdcardfs/file.c
+++ b/fs/sdcardfs/file.c
@@ -118,7 +118,11 @@
goto out;
/* save current_cred and override it */
- OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+ saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+ if (!saved_cred) {
+ err = -ENOMEM;
+ goto out;
+ }
if (lower_file->f_op->unlocked_ioctl)
err = lower_file->f_op->unlocked_ioctl(lower_file, cmd, arg);
@@ -127,7 +131,7 @@
if (!err)
sdcardfs_copy_and_fix_attrs(file_inode(file),
file_inode(lower_file));
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out:
return err;
}
@@ -149,12 +153,16 @@
goto out;
/* save current_cred and override it */
- OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(file_inode(file)));
+ saved_cred = override_fsids(sbi, SDCARDFS_I(file_inode(file))->data);
+ if (!saved_cred) {
+ err = -ENOMEM;
+ goto out;
+ }
if (lower_file->f_op->compat_ioctl)
err = lower_file->f_op->compat_ioctl(lower_file, cmd, arg);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out:
return err;
}
@@ -241,7 +249,11 @@
}
/* save current_cred and override it */
- OVERRIDE_CRED(sbi, saved_cred, SDCARDFS_I(inode));
+ saved_cred = override_fsids(sbi, SDCARDFS_I(inode)->data);
+ if (!saved_cred) {
+ err = -ENOMEM;
+ goto out_err;
+ }
file->private_data =
kzalloc(sizeof(struct sdcardfs_file_info), GFP_KERNEL);
@@ -271,7 +283,7 @@
sdcardfs_copy_and_fix_attrs(inode, sdcardfs_lower_inode(inode));
out_revert_cred:
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_err:
dput(parent);
return err;
diff --git a/fs/sdcardfs/inode.c b/fs/sdcardfs/inode.c
index 30d4db2..7c08ffe 100644
--- a/fs/sdcardfs/inode.c
+++ b/fs/sdcardfs/inode.c
@@ -22,7 +22,6 @@
#include <linux/fs_struct.h>
#include <linux/ratelimit.h>
-/* Do not directly use this function. Use OVERRIDE_CRED() instead. */
const struct cred *override_fsids(struct sdcardfs_sb_info *sbi,
struct sdcardfs_inode_data *data)
{
@@ -50,7 +49,6 @@
return old_cred;
}
-/* Do not directly use this function, use REVERT_CRED() instead. */
void revert_fsids(const struct cred *old_cred)
{
const struct cred *cur_cred;
@@ -78,7 +76,10 @@
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
@@ -95,8 +96,11 @@
err = -ENOMEM;
goto out_unlock;
}
+ copied_fs->umask = 0;
+ task_lock(current);
current->fs = copied_fs;
- current->fs->umask = 0;
+ task_unlock(current);
+
err = vfs_create2(lower_dentry_mnt, d_inode(lower_parent_dentry), lower_dentry, mode, want_excl);
if (err)
goto out;
@@ -110,58 +114,18 @@
fixup_lower_ownership(dentry, dentry->d_name.name);
out:
+ task_lock(current);
current->fs = saved_fs;
+ task_unlock(current);
free_fs_struct(copied_fs);
out_unlock:
unlock_dir(lower_parent_dentry);
sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
-#if 0
-static int sdcardfs_link(struct dentry *old_dentry, struct inode *dir,
- struct dentry *new_dentry)
-{
- struct dentry *lower_old_dentry;
- struct dentry *lower_new_dentry;
- struct dentry *lower_dir_dentry;
- u64 file_size_save;
- int err;
- struct path lower_old_path, lower_new_path;
-
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
- file_size_save = i_size_read(d_inode(old_dentry));
- sdcardfs_get_lower_path(old_dentry, &lower_old_path);
- sdcardfs_get_lower_path(new_dentry, &lower_new_path);
- lower_old_dentry = lower_old_path.dentry;
- lower_new_dentry = lower_new_path.dentry;
- lower_dir_dentry = lock_parent(lower_new_dentry);
-
- err = vfs_link(lower_old_dentry, d_inode(lower_dir_dentry),
- lower_new_dentry, NULL);
- if (err || !d_inode(lower_new_dentry))
- goto out;
-
- err = sdcardfs_interpose(new_dentry, dir->i_sb, &lower_new_path);
- if (err)
- goto out;
- fsstack_copy_attr_times(dir, d_inode(lower_new_dentry));
- fsstack_copy_inode_size(dir, d_inode(lower_new_dentry));
- set_nlink(d_inode(old_dentry),
- sdcardfs_lower_inode(d_inode(old_dentry))->i_nlink);
- i_size_write(d_inode(new_dentry), file_size_save);
-out:
- unlock_dir(lower_dir_dentry);
- sdcardfs_put_lower_path(old_dentry, &lower_old_path);
- sdcardfs_put_lower_path(new_dentry, &lower_new_path);
- REVERT_CRED();
- return err;
-}
-#endif
-
static int sdcardfs_unlink(struct inode *dir, struct dentry *dentry)
{
int err;
@@ -178,7 +142,10 @@
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
@@ -209,43 +176,11 @@
unlock_dir(lower_dir_dentry);
dput(lower_dentry);
sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
-#if 0
-static int sdcardfs_symlink(struct inode *dir, struct dentry *dentry,
- const char *symname)
-{
- int err;
- struct dentry *lower_dentry;
- struct dentry *lower_parent_dentry = NULL;
- struct path lower_path;
-
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
- sdcardfs_get_lower_path(dentry, &lower_path);
- lower_dentry = lower_path.dentry;
- lower_parent_dentry = lock_parent(lower_dentry);
-
- err = vfs_symlink(d_inode(lower_parent_dentry), lower_dentry, symname);
- if (err)
- goto out;
- err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
- if (err)
- goto out;
- fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
- fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
- unlock_dir(lower_parent_dentry);
- sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED();
- return err;
-}
-#endif
-
static int touch(char *abs_path, mode_t mode)
{
struct file *filp = filp_open(abs_path, O_RDWR|O_CREAT|O_EXCL|O_NOFOLLOW, mode);
@@ -287,7 +222,10 @@
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
/* check disk space */
parent_dentry = dget_parent(dentry);
@@ -316,8 +254,11 @@
unlock_dir(lower_parent_dentry);
goto out_unlock;
}
+ copied_fs->umask = 0;
+ task_lock(current);
current->fs = copied_fs;
- current->fs->umask = 0;
+ task_unlock(current);
+
err = vfs_mkdir2(lower_mnt, d_inode(lower_parent_dentry), lower_dentry, mode);
if (err) {
@@ -366,23 +307,34 @@
if (make_nomedia_in_obb ||
((pd->perm == PERM_ANDROID)
&& (qstr_case_eq(&dentry->d_name, &q_data)))) {
- REVERT_CRED(saved_cred);
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(d_inode(dentry)));
+ revert_fsids(saved_cred);
+ saved_cred = override_fsids(sbi,
+ SDCARDFS_I(d_inode(dentry))->data);
+ if (!saved_cred) {
+ pr_err("sdcardfs: failed to set up .nomedia in %s: %d\n",
+ lower_path.dentry->d_name.name,
+ -ENOMEM);
+ goto out;
+ }
set_fs_pwd(current->fs, &lower_path);
touch_err = touch(".nomedia", 0664);
if (touch_err) {
pr_err("sdcardfs: failed to create .nomedia in %s: %d\n",
- lower_path.dentry->d_name.name, touch_err);
+ lower_path.dentry->d_name.name,
+ touch_err);
goto out;
}
}
out:
+ task_lock(current);
current->fs = saved_fs;
+ task_unlock(current);
+
free_fs_struct(copied_fs);
out_unlock:
sdcardfs_put_lower_path(dentry, &lower_path);
out_revert:
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
@@ -402,7 +354,10 @@
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
/* sdcardfs_get_real_lower(): in case of remove an user's obb dentry
* the dentry on the original path should be deleted.
@@ -427,44 +382,11 @@
out:
unlock_dir(lower_dir_dentry);
sdcardfs_put_real_lower(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
-#if 0
-static int sdcardfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode,
- dev_t dev)
-{
- int err;
- struct dentry *lower_dentry;
- struct dentry *lower_parent_dentry = NULL;
- struct path lower_path;
-
- OVERRIDE_CRED(SDCARDFS_SB(dir->i_sb));
-
- sdcardfs_get_lower_path(dentry, &lower_path);
- lower_dentry = lower_path.dentry;
- lower_parent_dentry = lock_parent(lower_dentry);
-
- err = vfs_mknod(d_inode(lower_parent_dentry), lower_dentry, mode, dev);
- if (err)
- goto out;
-
- err = sdcardfs_interpose(dentry, dir->i_sb, &lower_path);
- if (err)
- goto out;
- fsstack_copy_attr_times(dir, sdcardfs_lower_inode(dir));
- fsstack_copy_inode_size(dir, d_inode(lower_parent_dentry));
-
-out:
- unlock_dir(lower_parent_dentry);
- sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED();
- return err;
-}
-#endif
-
/*
* The locking rules in sdcardfs_rename are complex. We could use a simpler
* superblock-level name-space lock for renames and copy-ups.
@@ -493,7 +415,10 @@
}
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(old_dir->i_sb), saved_cred, SDCARDFS_I(new_dir));
+ saved_cred = override_fsids(SDCARDFS_SB(old_dir->i_sb),
+ SDCARDFS_I(new_dir)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_real_lower(old_dentry, &lower_old_path);
sdcardfs_get_lower_path(new_dentry, &lower_new_path);
@@ -540,7 +465,7 @@
dput(lower_new_dir_dentry);
sdcardfs_put_real_lower(old_dentry, &lower_old_path);
sdcardfs_put_lower_path(new_dentry, &lower_new_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_eacces:
return err;
}
@@ -659,33 +584,7 @@
if (IS_POSIXACL(inode))
pr_warn("%s: This may be undefined behavior...\n", __func__);
err = generic_permission(&tmp, mask);
- /* XXX
- * Original sdcardfs code calls inode_permission(lower_inode,.. )
- * for checking inode permission. But doing such things here seems
- * duplicated work, because the functions called after this func,
- * such as vfs_create, vfs_unlink, vfs_rename, and etc,
- * does exactly same thing, i.e., they calls inode_permission().
- * So we just let they do the things.
- * If there are any security hole, just uncomment following if block.
- */
-#if 0
- if (!err) {
- /*
- * Permission check on lower_inode(=EXT4).
- * we check it with AID_MEDIA_RW permission
- */
- struct inode *lower_inode;
-
- OVERRIDE_CRED(SDCARDFS_SB(inode->sb));
-
- lower_inode = sdcardfs_lower_inode(inode);
- err = inode_permission(lower_inode, mask);
-
- REVERT_CRED();
- }
-#endif
return err;
-
}
static int sdcardfs_setattr_wrn(struct dentry *dentry, struct iattr *ia)
@@ -763,7 +662,10 @@
goto out_err;
/* save current_cred and override it */
- OVERRIDE_CRED(SDCARDFS_SB(dentry->d_sb), saved_cred, SDCARDFS_I(inode));
+ saved_cred = override_fsids(SDCARDFS_SB(dentry->d_sb),
+ SDCARDFS_I(inode)->data);
+ if (!saved_cred)
+ return -ENOMEM;
sdcardfs_get_lower_path(dentry, &lower_path);
lower_dentry = lower_path.dentry;
@@ -822,7 +724,7 @@
out:
sdcardfs_put_lower_path(dentry, &lower_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_err:
return err;
}
@@ -905,13 +807,6 @@
.setattr = sdcardfs_setattr_wrn,
.setattr2 = sdcardfs_setattr,
.getattr = sdcardfs_getattr,
- /* XXX Following operations are implemented,
- * but FUSE(sdcard) or FAT does not support them
- * These methods are *NOT* perfectly tested.
- .symlink = sdcardfs_symlink,
- .link = sdcardfs_link,
- .mknod = sdcardfs_mknod,
- */
};
const struct inode_operations sdcardfs_main_iops = {
diff --git a/fs/sdcardfs/lookup.c b/fs/sdcardfs/lookup.c
index 98051996..beec63b 100644
--- a/fs/sdcardfs/lookup.c
+++ b/fs/sdcardfs/lookup.c
@@ -426,7 +426,12 @@
}
/* save current_cred and override it */
- OVERRIDE_CRED_PTR(SDCARDFS_SB(dir->i_sb), saved_cred, SDCARDFS_I(dir));
+ saved_cred = override_fsids(SDCARDFS_SB(dir->i_sb),
+ SDCARDFS_I(dir)->data);
+ if (!saved_cred) {
+ ret = ERR_PTR(-ENOMEM);
+ goto out_err;
+ }
sdcardfs_get_lower_path(parent, &lower_parent_path);
@@ -457,7 +462,7 @@
out:
sdcardfs_put_lower_path(parent, &lower_parent_path);
- REVERT_CRED(saved_cred);
+ revert_fsids(saved_cred);
out_err:
dput(parent);
return ret;
diff --git a/fs/sdcardfs/sdcardfs.h b/fs/sdcardfs/sdcardfs.h
index 826afb5..ec2290a 100644
--- a/fs/sdcardfs/sdcardfs.h
+++ b/fs/sdcardfs/sdcardfs.h
@@ -88,31 +88,6 @@
(x)->i_mode = ((x)->i_mode & S_IFMT) | 0775;\
} while (0)
-/* OVERRIDE_CRED() and REVERT_CRED()
- * OVERRIDE_CRED()
- * backup original task->cred
- * and modifies task->cred->fsuid/fsgid to specified value.
- * REVERT_CRED()
- * restore original task->cred->fsuid/fsgid.
- * These two macro should be used in pair, and OVERRIDE_CRED() should be
- * placed at the beginning of a function, right after variable declaration.
- */
-#define OVERRIDE_CRED(sdcardfs_sbi, saved_cred, info) \
- do { \
- saved_cred = override_fsids(sdcardfs_sbi, info->data); \
- if (!saved_cred) \
- return -ENOMEM; \
- } while (0)
-
-#define OVERRIDE_CRED_PTR(sdcardfs_sbi, saved_cred, info) \
- do { \
- saved_cred = override_fsids(sdcardfs_sbi, info->data); \
- if (!saved_cred) \
- return ERR_PTR(-ENOMEM); \
- } while (0)
-
-#define REVERT_CRED(saved_cred) revert_fsids(saved_cred)
-
/* Android 5.0 support */
/* Permission mode for a specific node. Controls how file permissions
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 03dda1c..727a9e3 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -1918,6 +1918,9 @@
int dev, vol;
char *endptr;
+ if (!name || !*name)
+ return ERR_PTR(-EINVAL);
+
/* First, try to open using the device node path method */
ubi = ubi_open_volume_path(name, mode);
if (!IS_ERR(ubi))
diff --git a/fs/xattr.c b/fs/xattr.c
index 2c2c28e..1c91835 100644
--- a/fs/xattr.c
+++ b/fs/xattr.c
@@ -953,17 +953,19 @@
int err = 0;
#ifdef CONFIG_FS_POSIX_ACL
- if (inode->i_acl) {
- err = xattr_list_one(&buffer, &remaining_size,
- XATTR_NAME_POSIX_ACL_ACCESS);
- if (err)
- return err;
- }
- if (inode->i_default_acl) {
- err = xattr_list_one(&buffer, &remaining_size,
- XATTR_NAME_POSIX_ACL_DEFAULT);
- if (err)
- return err;
+ if (IS_POSIXACL(inode)) {
+ if (inode->i_acl) {
+ err = xattr_list_one(&buffer, &remaining_size,
+ XATTR_NAME_POSIX_ACL_ACCESS);
+ if (err)
+ return err;
+ }
+ if (inode->i_default_acl) {
+ err = xattr_list_one(&buffer, &remaining_size,
+ XATTR_NAME_POSIX_ACL_DEFAULT);
+ if (err)
+ return err;
+ }
}
#endif
diff --git a/include/Kbuild b/include/Kbuild
index bab1145..9205b04 100644
--- a/include/Kbuild
+++ b/include/Kbuild
@@ -1,2 +1,6 @@
# Top-level Makefile calls into asm-$(ARCH)
# List only non-arch directories below
+
+ifneq ($(VSERVICES_SUPPORT), "")
+header-y += vservices/
+endif
diff --git a/include/crypto/speck.h b/include/crypto/speck.h
deleted file mode 100644
index 73cfc95..0000000
--- a/include/crypto/speck.h
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0
-/*
- * Common values for the Speck algorithm
- */
-
-#ifndef _CRYPTO_SPECK_H
-#define _CRYPTO_SPECK_H
-
-#include <linux/types.h>
-
-/* Speck128 */
-
-#define SPECK128_BLOCK_SIZE 16
-
-#define SPECK128_128_KEY_SIZE 16
-#define SPECK128_128_NROUNDS 32
-
-#define SPECK128_192_KEY_SIZE 24
-#define SPECK128_192_NROUNDS 33
-
-#define SPECK128_256_KEY_SIZE 32
-#define SPECK128_256_NROUNDS 34
-
-struct speck128_tfm_ctx {
- u64 round_keys[SPECK128_256_NROUNDS];
- int nrounds;
-};
-
-void crypto_speck128_encrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-void crypto_speck128_decrypt(const struct speck128_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-int crypto_speck128_setkey(struct speck128_tfm_ctx *ctx, const u8 *key,
- unsigned int keysize);
-
-/* Speck64 */
-
-#define SPECK64_BLOCK_SIZE 8
-
-#define SPECK64_96_KEY_SIZE 12
-#define SPECK64_96_NROUNDS 26
-
-#define SPECK64_128_KEY_SIZE 16
-#define SPECK64_128_NROUNDS 27
-
-struct speck64_tfm_ctx {
- u32 round_keys[SPECK64_128_NROUNDS];
- int nrounds;
-};
-
-void crypto_speck64_encrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-void crypto_speck64_decrypt(const struct speck64_tfm_ctx *ctx,
- u8 *out, const u8 *in);
-
-int crypto_speck64_setkey(struct speck64_tfm_ctx *ctx, const u8 *key,
- unsigned int keysize);
-
-#endif /* _CRYPTO_SPECK_H */
diff --git a/include/linux/Kbuild.vservices b/include/linux/Kbuild.vservices
new file mode 100644
index 0000000..392f559
--- /dev/null
+++ b/include/linux/Kbuild.vservices
@@ -0,0 +1,3 @@
+#
+# Virtual Services headers which need to be exported for user-space
+#
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 43072b1..9f721ee 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -201,47 +201,57 @@
#define __declare_arg_0(a0, res) \
struct arm_smccc_res *___res = res; \
- register u32 r0 SMCCC_REG(0) = a0; \
+ register unsigned long r0 SMCCC_REG(0) = (u32)a0; \
register unsigned long r1 SMCCC_REG(1); \
register unsigned long r2 SMCCC_REG(2); \
register unsigned long r3 SMCCC_REG(3)
#define __declare_arg_1(a0, a1, res) \
+ typeof(a1) __a1 = a1; \
struct arm_smccc_res *___res = res; \
- register u32 r0 SMCCC_REG(0) = a0; \
- register typeof(a1) r1 SMCCC_REG(1) = a1; \
+ register unsigned long r0 SMCCC_REG(0) = (u32)a0; \
+ register unsigned long r1 SMCCC_REG(1) = __a1; \
register unsigned long r2 SMCCC_REG(2); \
register unsigned long r3 SMCCC_REG(3)
#define __declare_arg_2(a0, a1, a2, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
struct arm_smccc_res *___res = res; \
- register u32 r0 SMCCC_REG(0) = a0; \
- register typeof(a1) r1 SMCCC_REG(1) = a1; \
- register typeof(a2) r2 SMCCC_REG(2) = a2; \
+ register unsigned long r0 SMCCC_REG(0) = (u32)a0; \
+ register unsigned long r1 SMCCC_REG(1) = __a1; \
+ register unsigned long r2 SMCCC_REG(2) = __a2; \
register unsigned long r3 SMCCC_REG(3)
#define __declare_arg_3(a0, a1, a2, a3, res) \
+ typeof(a1) __a1 = a1; \
+ typeof(a2) __a2 = a2; \
+ typeof(a3) __a3 = a3; \
struct arm_smccc_res *___res = res; \
- register u32 r0 SMCCC_REG(0) = a0; \
- register typeof(a1) r1 SMCCC_REG(1) = a1; \
- register typeof(a2) r2 SMCCC_REG(2) = a2; \
- register typeof(a3) r3 SMCCC_REG(3) = a3
+ register unsigned long r0 SMCCC_REG(0) = (u32)a0; \
+ register unsigned long r1 SMCCC_REG(1) = __a1; \
+ register unsigned long r2 SMCCC_REG(2) = __a2; \
+ register unsigned long r3 SMCCC_REG(3) = __a3
#define __declare_arg_4(a0, a1, a2, a3, a4, res) \
+ typeof(a4) __a4 = a4; \
__declare_arg_3(a0, a1, a2, a3, res); \
- register typeof(a4) r4 SMCCC_REG(4) = a4
+ register unsigned long r4 SMCCC_REG(4) = __a4
#define __declare_arg_5(a0, a1, a2, a3, a4, a5, res) \
+ typeof(a5) __a5 = a5; \
__declare_arg_4(a0, a1, a2, a3, a4, res); \
- register typeof(a5) r5 SMCCC_REG(5) = a5
+ register unsigned long r5 SMCCC_REG(5) = __a5
#define __declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res) \
+ typeof(a6) __a6 = a6; \
__declare_arg_5(a0, a1, a2, a3, a4, a5, res); \
- register typeof(a6) r6 SMCCC_REG(6) = a6
+ register unsigned long r6 SMCCC_REG(6) = __a6
#define __declare_arg_7(a0, a1, a2, a3, a4, a5, a6, a7, res) \
+ typeof(a7) __a7 = a7; \
__declare_arg_6(a0, a1, a2, a3, a4, a5, a6, res); \
- register typeof(a7) r7 SMCCC_REG(7) = a7
+ register unsigned long r7 SMCCC_REG(7) = __a7
#define ___declare_args(count, ...) __declare_arg_ ## count(__VA_ARGS__)
#define __declare_args(count, ...) ___declare_args(count, __VA_ARGS__)
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index a149671..f0641d3 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -188,6 +188,9 @@
__REQ_FUA, /* forced unit access */
__REQ_PREFLUSH, /* request for cache flush */
__REQ_BARRIER, /* marks flush req as barrier */
+ /* Android specific flags */
+ __REQ_NOENCRYPT, /* ok to not encrypt (already encrypted at fs
+ level) */
/* bio only flags */
__REQ_RAHEAD, /* read ahead, can fail anytime */
@@ -228,6 +231,7 @@
#define REQ_URGENT (1ULL << __REQ_URGENT)
#define REQ_NOIDLE (1ULL << __REQ_NOIDLE)
#define REQ_INTEGRITY (1ULL << __REQ_INTEGRITY)
+#define REQ_NOENCRYPT (1ULL << __REQ_NOENCRYPT)
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index aa5db8b..f70f8ac 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -304,11 +304,6 @@
* For NAT entries
*/
#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
-#define NAT_ENTRY_BITMAP_SIZE ((NAT_ENTRY_PER_BLOCK + 7) / 8)
-#define NAT_ENTRY_BITMAP_SIZE_ALIGNED \
- ((NAT_ENTRY_BITMAP_SIZE + BITS_PER_LONG - 1) / \
- BITS_PER_LONG * BITS_PER_LONG)
-
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index e35e6de..9b9f65d 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -22,7 +22,7 @@
unsigned char *vec);
extern bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush);
+ pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot,
int prot_numa);
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index c2a0f00..734377a 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -292,6 +292,8 @@
return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
}
+extern u64 jiffies64_to_nsecs(u64 j);
+
extern unsigned long __msecs_to_jiffies(const unsigned int m);
#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
/*
diff --git a/include/linux/kernel_stat.h b/include/linux/kernel_stat.h
index d0826f1..44fda64 100644
--- a/include/linux/kernel_stat.h
+++ b/include/linux/kernel_stat.h
@@ -33,7 +33,6 @@
struct kernel_cpustat {
u64 cpustat[NR_STATS];
- u64 softirq_no_ksoftirqd;
};
struct kernel_stat {
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index e1b845a..153ed93 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -2335,6 +2335,13 @@
struct net_device *dev;
};
+struct netdev_notifier_info_ext {
+ struct netdev_notifier_info info; /* must be first */
+ union {
+ u32 mtu;
+ } ext;
+};
+
struct netdev_notifier_change_info {
struct netdev_notifier_info info; /* must be first */
unsigned int flags_changed;
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 984b211..ea8a977 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -123,4 +123,9 @@
/* True if the target is not a standard target */
#define INVALID_TARGET (info->target < -NUM_STANDARD_TARGETS || info->target >= 0)
+static inline bool ebt_invalid_target(int target)
+{
+ return (target < -NUM_STANDARD_TARGETS || target >= 0);
+}
+
#endif
diff --git a/include/linux/platform_data/ina2xx.h b/include/linux/platform_data/ina2xx.h
index 9abc0ca..9f0aa1b 100644
--- a/include/linux/platform_data/ina2xx.h
+++ b/include/linux/platform_data/ina2xx.h
@@ -1,7 +1,7 @@
/*
* Driver for Texas Instruments INA219, INA226 power monitor chips
*
- * Copyright (C) 2012 Lothar Felten <l-felten@ti.com>
+ * Copyright (C) 2012 Lothar Felten <lothar.felten@gmail.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 4c07788..a8d1fa5 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -303,6 +303,7 @@
POWER_SUPPLY_PROP_SOH,
POWER_SUPPLY_PROP_QC_OPTI_DISABLE,
POWER_SUPPLY_PROP_FCC_STEPPER_ENABLE,
+ POWER_SUPPLY_PROP_CC_SOC,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
@@ -443,6 +444,7 @@
spinlock_t changed_lock;
bool changed;
bool initialized;
+ bool removing;
atomic_t use_cnt;
#ifdef CONFIG_THERMAL
struct thermal_zone_device *tzd;
diff --git a/include/linux/qpnp/qpnp-adc.h b/include/linux/qpnp/qpnp-adc.h
index 770cd64..308225b 100644
--- a/include/linux/qpnp/qpnp-adc.h
+++ b/include/linux/qpnp/qpnp-adc.h
@@ -406,6 +406,8 @@
* %SCALE_BATT_THERM_TEMP_PU400: Conversion to temperature(decidegC) for 400k
* pullup.
* %SCALE_SMB1390_DIE_TEMP: Conversion for SMB1390 die temp
+ * %SCALE_BATT_THERM_TEMP_QRD_215: Conversion to temperature(decidegC) based on
+ * btm parameters for QRD.
* %SCALE_NONE: Do not use this scaling type.
*/
enum qpnp_adc_scale_fn_type {
@@ -432,6 +434,7 @@
SCALE_SMB1390_DIE_TEMP,
SCALE_BATT_THERM_TEMP_PU30,
SCALE_BATT_THERM_TEMP_PU400,
+ SCALE_BATT_THERM_TEMP_QRD_215,
SCALE_NONE,
};
@@ -1510,6 +1513,23 @@
const struct qpnp_vadc_chan_properties *chan_prop,
struct qpnp_vadc_result *chan_rslt);
/**
+ * qpnp_adc_batt_therm_qrd_215() - Scales the pre-calibrated digital output
+ * of an ADC to the ADC reference and compensates for the
+ * gain and offset. Returns the temperature in decidegC for QRD.
+ * @dev: Structure device for qpnp vadc
+ * @adc_code: pre-calibrated digital output of the ADC.
+ * @adc_prop: adc properties of the pm8xxx adc such as bit resolution,
+ * reference voltage.
+ * @chan_prop: individual channel properties to compensate the i/p scaling,
+ * slope and offset.
+ * @chan_rslt: physical result to be stored.
+ */
+int32_t qpnp_adc_batt_therm_qrd_215(struct qpnp_vadc_chip *dev,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt);
+/**
* qpnp_adc_scale_batt_therm() - Scales the pre-calibrated digital output
* of an ADC to the ADC reference and compensates for the
* gain and offset. Returns the temperature in decidegC.
@@ -2164,6 +2184,12 @@
const struct qpnp_vadc_chan_properties *chan_prop,
struct qpnp_vadc_result *chan_rslt)
{ return -ENXIO; }
+static inline int32_t qpnp_adc_batt_therm_qrd_215(struct qpnp_vadc_chip *vadc,
+ int32_t adc_code,
+ const struct qpnp_adc_properties *adc_prop,
+ const struct qpnp_vadc_chan_properties *chan_prop,
+ struct qpnp_vadc_result *chan_rslt)
+{ return -ENXIO; }
static inline int32_t qpnp_adc_scale_batt_therm(struct qpnp_vadc_chip *vadc,
int32_t adc_code,
const struct qpnp_adc_properties *adc_prop,
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 85d1ffc..4421e5c 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -138,7 +138,6 @@
/**
* struct rhashtable - Hash table handle
* @tbl: Bucket table
- * @nelems: Number of elements in table
* @key_len: Key length for hashfn
* @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
@@ -146,10 +145,10 @@
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
* @lock: Spin lock to protect walker list
+ * @nelems: Number of elements in table
*/
struct rhashtable {
struct bucket_table __rcu *tbl;
- atomic_t nelems;
unsigned int key_len;
unsigned int elasticity;
struct rhashtable_params p;
@@ -157,6 +156,7 @@
struct work_struct run_work;
struct mutex mutex;
spinlock_t lock;
+ atomic_t nelems;
};
/**
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 9b8e0ed..11bb7cb 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -643,9 +643,14 @@
struct skb_mstamp skb_mstamp;
};
};
- struct rb_node rbnode; /* used in netem & tcp stack */
+ struct rb_node rbnode; /* used in netem, ip4 defrag, and tcp stack */
};
- struct sock *sk;
+
+ union {
+ struct sock *sk;
+ int ip_defrag_offset;
+ };
+
struct net_device *dev;
/*
@@ -2417,7 +2422,7 @@
kfree_skb(skb);
}
-void skb_rbtree_purge(struct rb_root *root);
+unsigned int skb_rbtree_purge(struct rb_root *root);
void *netdev_alloc_frag(unsigned int fragsz);
@@ -2953,6 +2958,7 @@
return skb->data;
}
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len);
/**
* pskb_trim_rcsum - trim received skb and update checksum
* @skb: buffer to trim
@@ -2966,9 +2972,7 @@
{
if (likely(len >= skb->len))
return 0;
- if (skb->ip_summed == CHECKSUM_COMPLETE)
- skb->ip_summed = CHECKSUM_NONE;
- return __pskb_trim(skb, len);
+ return pskb_trim_rcsum_slow(skb, len);
}
static inline int __skb_trim_rcsum(struct sk_buff *skb, unsigned int len)
@@ -2988,6 +2992,12 @@
#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define rb_to_skb(rb) rb_entry_safe(rb, struct sk_buff, rbnode)
+#define skb_rb_first(root) rb_to_skb(rb_first(root))
+#define skb_rb_last(root) rb_to_skb(rb_last(root))
+#define skb_rb_next(skb) rb_to_skb(rb_next(&(skb)->rbnode))
+#define skb_rb_prev(skb) rb_to_skb(rb_prev(&(skb)->rbnode))
+
#define skb_queue_walk(queue, skb) \
for (skb = (queue)->next; \
skb != (struct sk_buff *)(queue); \
@@ -3002,6 +3012,18 @@
for (; skb != (struct sk_buff *)(queue); \
skb = skb->next)
+#define skb_rbtree_walk(skb, root) \
+ for (skb = skb_rb_first(root); skb != NULL; \
+ skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from(skb) \
+ for (; skb != NULL; \
+ skb = skb_rb_next(skb))
+
+#define skb_rbtree_walk_from_safe(skb, tmp) \
+ for (; tmp = skb ? skb_rb_next(skb) : NULL, (skb != NULL); \
+ skb = tmp)
+
#define skb_queue_walk_from_safe(queue, skb, tmp) \
for (tmp = skb->next; \
skb != (struct sk_buff *)(queue); \
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 75f56c2..b6a59e8 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -67,7 +67,8 @@
int size; /* The size of an object including meta data */
int object_size; /* The size of an object without meta data */
int offset; /* Free pointer offset. */
- int cpu_partial; /* Number of per cpu partial objects to keep around */
+ /* Number of per cpu partial objects to keep around */
+ unsigned int cpu_partial;
struct kmem_cache_order_objects oo;
/* Allocation and freeing of slabs */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index fe1b862..bcfeb9e 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -313,6 +313,10 @@
wait_queue_head_t write_wait;
wait_queue_head_t read_wait;
struct work_struct hangup_work;
+#if defined(CONFIG_TTY_FLUSH_LOCAL_ECHO)
+ int delayed_work;
+ struct delayed_work echo_delayed_work;
+#endif
void *disc_data;
void *driver_data;
spinlock_t files_lock; /* protects tty_files list */
diff --git a/include/media/v4l2-fh.h b/include/media/v4l2-fh.h
index e19e624..d267160 100644
--- a/include/media/v4l2-fh.h
+++ b/include/media/v4l2-fh.h
@@ -42,10 +42,13 @@
* @prio: priority of the file handler, as defined by &enum v4l2_priority
*
* @wait: event' s wait queue
+ * @subscribe_lock: serialise changes to the subscribed list; guarantee that
+ * the add and del event callbacks are orderly called
* @subscribed: list of subscribed events
* @available: list of events waiting to be dequeued
* @navailable: number of available events at @available list
* @sequence: event sequence number
+ *
* @m2m_ctx: pointer to &struct v4l2_m2m_ctx
*/
struct v4l2_fh {
@@ -56,6 +59,7 @@
/* Events */
wait_queue_head_t wait;
+ struct mutex subscribe_lock;
struct list_head subscribed;
struct list_head available;
unsigned int navailable;
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 714428c..8750c2c 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -139,12 +139,6 @@
int mode;
};
-struct netdev_notify_work {
- struct delayed_work work;
- struct net_device *dev;
- struct netdev_bonding_info bonding_info;
-};
-
struct slave {
struct net_device *dev; /* first - useful for panic debug */
struct bonding *bond; /* our master */
@@ -171,6 +165,7 @@
#ifdef CONFIG_NET_POLL_CONTROLLER
struct netpoll *np;
#endif
+ struct delayed_work notify_work;
struct kobject kobj;
struct rtnl_link_stats64 slave_stats;
};
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index 4e86755..8a1772e 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -161,6 +161,7 @@
extern int cnss_self_recovery(struct device *dev,
enum cnss_recovery_reason reason);
extern int cnss_force_fw_assert(struct device *dev);
+extern int cnss_force_collect_rddm(struct device *dev);
extern void *cnss_get_virt_ramdump_mem(struct device *dev, unsigned long *size);
extern int cnss_get_fw_files_for_target(struct device *dev,
struct cnss_fw_files *pfw_files,
diff --git a/include/net/inet_frag.h b/include/net/inet_frag.h
index 634d192..a3812e9 100644
--- a/include/net/inet_frag.h
+++ b/include/net/inet_frag.h
@@ -1,14 +1,20 @@
#ifndef __NET_FRAG_H__
#define __NET_FRAG_H__
+#include <linux/rhashtable.h>
+
struct netns_frags {
- /* Keep atomic mem on separate cachelines in structs that include it */
- atomic_t mem ____cacheline_aligned_in_smp;
/* sysctls */
+ long high_thresh;
+ long low_thresh;
int timeout;
- int high_thresh;
- int low_thresh;
int max_dist;
+ struct inet_frags *f;
+
+ struct rhashtable rhashtable ____cacheline_aligned_in_smp;
+
+ /* Keep atomic mem on separate cachelines in structs that include it */
+ atomic_long_t mem ____cacheline_aligned_in_smp;
};
/**
@@ -24,130 +30,115 @@
INET_FRAG_COMPLETE = BIT(2),
};
+struct frag_v4_compare_key {
+ __be32 saddr;
+ __be32 daddr;
+ u32 user;
+ u32 vif;
+ __be16 id;
+ u16 protocol;
+};
+
+struct frag_v6_compare_key {
+ struct in6_addr saddr;
+ struct in6_addr daddr;
+ u32 user;
+ __be32 id;
+ u32 iif;
+};
+
/**
* struct inet_frag_queue - fragment queue
*
- * @lock: spinlock protecting the queue
+ * @node: rhash node
+ * @key: keys identifying this frag.
* @timer: queue expiration timer
- * @list: hash bucket list
+ * @lock: spinlock protecting this frag
* @refcnt: reference count of the queue
* @fragments: received fragments head
+ * @rb_fragments: received fragments rb-tree root
* @fragments_tail: received fragments tail
+ * @last_run_head: the head of the last "run". see ip_fragment.c
* @stamp: timestamp of the last received fragment
* @len: total length of the original datagram
* @meat: length of received fragments so far
* @flags: fragment queue flags
* @max_size: maximum received fragment size
* @net: namespace that this frag belongs to
- * @list_evictor: list of queues to forcefully evict (e.g. due to low memory)
+ * @rcu: rcu head for freeing deferall
*/
struct inet_frag_queue {
- spinlock_t lock;
+ struct rhash_head node;
+ union {
+ struct frag_v4_compare_key v4;
+ struct frag_v6_compare_key v6;
+ } key;
struct timer_list timer;
- struct hlist_node list;
+ spinlock_t lock;
atomic_t refcnt;
- struct sk_buff *fragments;
+ struct sk_buff *fragments; /* Used in IPv6. */
+ struct rb_root rb_fragments; /* Used in IPv4. */
struct sk_buff *fragments_tail;
+ struct sk_buff *last_run_head;
ktime_t stamp;
int len;
int meat;
__u8 flags;
u16 max_size;
- struct netns_frags *net;
- struct hlist_node list_evictor;
-};
-
-#define INETFRAGS_HASHSZ 1024
-
-/* averaged:
- * max_depth = default ipfrag_high_thresh / INETFRAGS_HASHSZ /
- * rounded up (SKB_TRUELEN(0) + sizeof(struct ipq or
- * struct frag_queue))
- */
-#define INETFRAGS_MAXDEPTH 128
-
-struct inet_frag_bucket {
- struct hlist_head chain;
- spinlock_t chain_lock;
+ struct netns_frags *net;
+ struct rcu_head rcu;
};
struct inet_frags {
- struct inet_frag_bucket hash[INETFRAGS_HASHSZ];
-
- struct work_struct frags_work;
- unsigned int next_bucket;
- unsigned long last_rebuild_jiffies;
- bool rebuild;
-
- /* The first call to hashfn is responsible to initialize
- * rnd. This is best done with net_get_random_once.
- *
- * rnd_seqlock is used to let hash insertion detect
- * when it needs to re-lookup the hash chain to use.
- */
- u32 rnd;
- seqlock_t rnd_seqlock;
int qsize;
- unsigned int (*hashfn)(const struct inet_frag_queue *);
- bool (*match)(const struct inet_frag_queue *q,
- const void *arg);
void (*constructor)(struct inet_frag_queue *q,
const void *arg);
void (*destructor)(struct inet_frag_queue *);
void (*frag_expire)(unsigned long data);
struct kmem_cache *frags_cachep;
const char *frags_cache_name;
+ struct rhashtable_params rhash_params;
};
int inet_frags_init(struct inet_frags *);
void inet_frags_fini(struct inet_frags *);
-static inline void inet_frags_init_net(struct netns_frags *nf)
+static inline int inet_frags_init_net(struct netns_frags *nf)
{
- atomic_set(&nf->mem, 0);
+ atomic_long_set(&nf->mem, 0);
+ return rhashtable_init(&nf->rhashtable, &nf->f->rhash_params);
}
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f);
+void inet_frags_exit_net(struct netns_frags *nf);
-void inet_frag_kill(struct inet_frag_queue *q, struct inet_frags *f);
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f);
-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
- struct inet_frags *f, void *key, unsigned int hash);
+void inet_frag_kill(struct inet_frag_queue *q);
+void inet_frag_destroy(struct inet_frag_queue *q);
+struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key);
-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
- const char *prefix);
+/* Free all skbs in the queue; return the sum of their truesizes. */
+unsigned int inet_frag_rbtree_purge(struct rb_root *root);
-static inline void inet_frag_put(struct inet_frag_queue *q, struct inet_frags *f)
+static inline void inet_frag_put(struct inet_frag_queue *q)
{
if (atomic_dec_and_test(&q->refcnt))
- inet_frag_destroy(q, f);
-}
-
-static inline bool inet_frag_evicting(struct inet_frag_queue *q)
-{
- return !hlist_unhashed(&q->list_evictor);
+ inet_frag_destroy(q);
}
/* Memory Tracking Functions. */
-static inline int frag_mem_limit(struct netns_frags *nf)
+static inline long frag_mem_limit(const struct netns_frags *nf)
{
- return atomic_read(&nf->mem);
+ return atomic_long_read(&nf->mem);
}
-static inline void sub_frag_mem_limit(struct netns_frags *nf, int i)
+static inline void sub_frag_mem_limit(struct netns_frags *nf, long val)
{
- atomic_sub(i, &nf->mem);
+ atomic_long_sub(val, &nf->mem);
}
-static inline void add_frag_mem_limit(struct netns_frags *nf, int i)
+static inline void add_frag_mem_limit(struct netns_frags *nf, long val)
{
- atomic_add(i, &nf->mem);
-}
-
-static inline int sum_frag_mem_limit(struct netns_frags *nf)
-{
- return atomic_read(&nf->mem);
+ atomic_long_add(val, &nf->mem);
}
/* RFC 3168 support :
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index a2f3a49..c2865f5 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -132,12 +132,6 @@
return sk->sk_bound_dev_if;
}
-static inline struct ip_options_rcu *ireq_opt_deref(const struct inet_request_sock *ireq)
-{
- return rcu_dereference_check(ireq->ireq_opt,
- atomic_read(&ireq->req.rsk_refcnt) > 0);
-}
-
struct inet_cork {
unsigned int flags;
__be32 addr;
diff --git a/include/net/ip.h b/include/net/ip.h
index b1b5ee0..0623529 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -551,7 +551,6 @@
return skb;
}
#endif
-int ip_frag_mem(struct net *net);
/*
* Functions provided by ip_forward.c
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 978387d..a6446d7 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -363,6 +363,7 @@
int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu);
extern u32 fib_multipath_secret __read_mostly;
diff --git a/include/net/ipv6.h b/include/net/ipv6.h
index 64b0e9d..7cb100d 100644
--- a/include/net/ipv6.h
+++ b/include/net/ipv6.h
@@ -330,13 +330,6 @@
idev->cnf.accept_ra;
}
-#if IS_ENABLED(CONFIG_IPV6)
-static inline int ip6_frag_mem(struct net *net)
-{
- return sum_frag_mem_limit(&net->ipv6.frags);
-}
-#endif
-
#define IPV6_FRAG_HIGH_THRESH (4 * 1024*1024) /* 4194304 */
#define IPV6_FRAG_LOW_THRESH (3 * 1024*1024) /* 3145728 */
#define IPV6_FRAG_TIMEOUT (60 * HZ) /* 60 seconds */
@@ -530,17 +523,8 @@
__IP6_DEFRAG_CONNTRACK_BRIDGE_IN = IP6_DEFRAG_CONNTRACK_BRIDGE_IN + USHRT_MAX,
};
-struct ip6_create_arg {
- __be32 id;
- u32 user;
- const struct in6_addr *src;
- const struct in6_addr *dst;
- int iif;
- u8 ecn;
-};
-
void ip6_frag_init(struct inet_frag_queue *q, const void *a);
-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a);
+extern const struct rhashtable_params ip6_rhash_params;
/*
* Equivalent of ipv4 struct ip
@@ -548,19 +532,13 @@
struct frag_queue {
struct inet_frag_queue q;
- __be32 id; /* fragment id */
- u32 user;
- struct in6_addr saddr;
- struct in6_addr daddr;
-
int iif;
unsigned int csum;
__u16 nhoffset;
u8 ecn;
};
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
- struct inet_frags *frags);
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq);
static inline bool ipv6_addr_any(const struct in6_addr *a)
{
diff --git a/include/net/nfc/hci.h b/include/net/nfc/hci.h
index 316694d..008f466 100644
--- a/include/net/nfc/hci.h
+++ b/include/net/nfc/hci.h
@@ -87,7 +87,7 @@
* According to specification 102 622 chapter 4.4 Pipes,
* the pipe identifier is 7 bits long.
*/
-#define NFC_HCI_MAX_PIPES 127
+#define NFC_HCI_MAX_PIPES 128
struct nfc_hci_init_data {
u8 gate_count;
struct nfc_hci_gate gates[NFC_HCI_MAX_CUSTOM_GATES];
diff --git a/include/soc/qcom/lpm_levels.h b/include/soc/qcom/lpm_levels.h
index 3665caf..7104ed5 100644
--- a/include/soc/qcom/lpm_levels.h
+++ b/include/soc/qcom/lpm_levels.h
@@ -15,7 +15,7 @@
struct system_pm_ops {
int (*enter)(struct cpumask *mask);
- void (*exit)(void);
+ void (*exit)(bool success);
int (*update_wakeup)(bool);
bool (*sleep_allowed)(void);
};
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index 435cee5..c2882c2 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -515,3 +515,8 @@
header-y += msm_rotator.h
header-y += bgcom_interface.h
header-y += nfc/
+
+ifneq ($(VSERVICES_SUPPORT), "")
+include include/linux/Kbuild.vservices
+endif
+header-y += okl4-link-shbuf.h
diff --git a/include/uapi/linux/loop.h b/include/uapi/linux/loop.h
index c8125ec..23158db 100644
--- a/include/uapi/linux/loop.h
+++ b/include/uapi/linux/loop.h
@@ -88,6 +88,7 @@
#define LOOP_CHANGE_FD 0x4C06
#define LOOP_SET_CAPACITY 0x4C07
#define LOOP_SET_DIRECT_IO 0x4C08
+#define LOOP_SET_BLOCK_SIZE 0x4C09
/* /dev/loop-control interface */
#define LOOP_CTL_ADD 0x4C80
diff --git a/include/uapi/linux/okl4-link-shbuf.h b/include/uapi/linux/okl4-link-shbuf.h
new file mode 100644
index 0000000..69561bc
--- /dev/null
+++ b/include/uapi/linux/okl4-link-shbuf.h
@@ -0,0 +1,40 @@
+/*
+ * User-visible interface to driver for inter-cell links using the
+ * shared-buffer transport.
+ *
+ * Copyright (c) 2016 Cog Systems Pty Ltd.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ */
+#ifndef _LINUX_OKL4_LINK_SHBUF_H
+#define _LINUX_OKL4_LINK_SHBUF_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * Ioctl that indicates a request to raise the outgoing vIRQ. This value is
+ * chosen to avoid conflict with the numbers documented in Linux 4.1's
+ * ioctl-numbers.txt. The argument is a payload to transmit to the receiver.
+ * Note that consecutive transmissions without an interleaved clear of the
+ * interrupt results in the payloads being ORed together.
+ */
+#define OKL4_LINK_SHBUF_IOCTL_IRQ_TX _IOW(0x8d, 1, __u64)
+
+/*
+ * Ioctl that indicates a request to clear any pending incoming vIRQ. The value
+ * returned through the argument to the ioctl is the payload, which is also
+ * cleared.
+ *
+ * The caller cannot distinguish between the cases of no pending interrupt and
+ * a pending interrupt with payload 0. It is expected that the caller is
+ * communicating with a cooperative sender and has polled their file descriptor
+ * to determine there is a pending interrupt before using this ioctl.
+ */
+#define OKL4_LINK_SHBUF_IOCTL_IRQ_CLR _IOR(0x8d, 2, __u64)
+
+#endif /* _LINUX_OKL4_LINK_SHBUF_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index e7a31f8..3442a26 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -55,6 +55,7 @@
IPSTATS_MIB_ECT1PKTS, /* InECT1Pkts */
IPSTATS_MIB_ECT0PKTS, /* InECT0Pkts */
IPSTATS_MIB_CEPKTS, /* InCEPkts */
+ IPSTATS_MIB_REASM_OVERLAPS, /* ReasmOverlaps */
__IPSTATS_MIB_MAX
};
diff --git a/include/vservices/Kbuild b/include/vservices/Kbuild
new file mode 100644
index 0000000..8b955fc
--- /dev/null
+++ b/include/vservices/Kbuild
@@ -0,0 +1,2 @@
+header-y += protocol/
+header-y += ioctl.h
diff --git a/include/vservices/buffer.h b/include/vservices/buffer.h
new file mode 100644
index 0000000..910aa07
--- /dev/null
+++ b/include/vservices/buffer.h
@@ -0,0 +1,239 @@
+/*
+ * include/vservices/buffer.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines simple wrapper types for strings and variable-size buffers
+ * that are stored inside Virtual Services message buffers.
+ */
+
+#ifndef _VSERVICES_BUFFER_H_
+#define _VSERVICES_BUFFER_H_
+
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/slab.h>
+
+struct vs_mbuf;
+
+/**
+ * struct vs_string - Virtual Services fixed sized string type
+ * @ptr: String pointer
+ * @max_size: Maximum length of the string in bytes
+ *
+ * A handle to a possibly NUL-terminated string stored in a message buffer. If
+ * the size of the string equals to max_size, the string is not NUL-terminated.
+ * If the protocol does not specify an encoding, the encoding is assumed to be
+ * UTF-8. Wide character encodings are not supported by this type; use struct
+ * vs_pbuf for wide character strings.
+ */
+struct vs_string {
+ char *ptr;
+ size_t max_size;
+};
+
+/**
+ * vs_string_copyout - Copy a Virtual Services string to a C string buffer.
+ * @dest: C string to copy to
+ * @src: Virtual Services string to copy from
+ * @max_size: Size of the destination buffer, including the NUL terminator.
+ *
+ * The behaviour is similar to strlcpy(): that is, the copied string
+ * is guaranteed not to exceed the specified size (including the NUL
+ * terminator byte), and is guaranteed to be NUL-terminated as long as
+ * the size is nonzero (unlike strncpy()).
+ *
+ * The return value is the size of the input string (even if the output was
+ * truncated); this is to make truncation easy to detect.
+ */
+static inline size_t
+vs_string_copyout(char *dest, const struct vs_string *src, size_t max_size)
+{
+ size_t src_len = strnlen(src->ptr, src->max_size);
+
+ if (max_size) {
+ size_t dest_len = min(src_len, max_size - 1);
+
+ memcpy(dest, src->ptr, dest_len);
+ dest[dest_len] = '\0';
+ }
+ return src_len;
+}
+
+/**
+ * vs_string_copyin_len - Copy a C string, up to a given length, into a Virtual
+ * Services string.
+ * @dest: Virtual Services string to copy to
+ * @src: C string to copy from
+ * @max_size: Maximum number of bytes to copy
+ *
+ * Returns the number of bytes copied, which may be less than the input
+ * string's length.
+ */
+static inline size_t
+vs_string_copyin_len(struct vs_string *dest, const char *src, size_t max_size)
+{
+ strncpy(dest->ptr, src, min(max_size, dest->max_size));
+
+ return strnlen(dest->ptr, dest->max_size);
+}
+
+/**
+ * vs_string_copyin - Copy a C string into a Virtual Services string.
+ * @dest: Virtual Services string to copy to
+ * @src: C string to copy from
+ *
+ * Returns the number of bytes copied, which may be less than the input
+ * string's length.
+ */
+static inline size_t
+vs_string_copyin(struct vs_string *dest, const char *src)
+{
+ return vs_string_copyin_len(dest, src, dest->max_size);
+}
+
+/**
+ * vs_string_length - Return the size of the string stored in a Virtual Services
+ * string.
+ * @str: Virtual Service string to get the length of
+ */
+static inline size_t
+vs_string_length(struct vs_string *str)
+{
+ return strnlen(str->ptr, str->max_size);
+}
+
+/**
+ * vs_string_dup - Allocate a C string buffer and copy a Virtual Services string
+ * into it.
+ * @str: Virtual Services string to duplicate
+ */
+static inline char *
+vs_string_dup(struct vs_string *str, gfp_t gfp)
+{
+ size_t len;
+ char *ret;
+
+ len = strnlen(str->ptr, str->max_size) + 1;
+ ret = kmalloc(len, gfp);
+ if (ret)
+ vs_string_copyout(ret, str, len);
+ return ret;
+}
+
+/**
+ * vs_string_max_size - Return the maximum size of a Virtual Services string,
+ * not including the NUL terminator if the lenght of the
+ * string is equal to max_size.
+ *
+ * @str Virtual Services string to return the maximum size of.
+ *
+ * @return The maximum size of the string.
+ */
+static inline size_t
+vs_string_max_size(struct vs_string *str)
+{
+ return str->max_size;
+}
+
+/**
+ * struct vs_pbuf - Handle to a variable-size buffered payload.
+ * @data: Data buffer
+ * @size: Current size of the buffer
+ * @max_size: Maximum size of the buffer
+ *
+ * This is similar to struct vs_string, except that has an explicitly
+ * stored size rather than being null-terminated. The functions that
+ * return ssize_t all return the new size of the modified buffer, and
+ * will return a negative size if the buffer overflows.
+ */
+struct vs_pbuf {
+ void *data;
+ size_t size, max_size;
+};
+
+/**
+ * vs_pbuf_size - Get the size of a pbuf
+ * @pbuf: pbuf to get the size of
+ */
+static inline size_t vs_pbuf_size(const struct vs_pbuf *pbuf)
+{
+ return pbuf->size;
+}
+
+/**
+ * vs_pbuf_data - Get the data pointer for a a pbuf
+ * @pbuf: pbuf to get the data pointer for
+ */
+static inline const void *vs_pbuf_data(const struct vs_pbuf *pbuf)
+{
+ return pbuf->data;
+}
+
+/**
+ * vs_pbuf_resize - Resize a pbuf
+ * @pbuf: pbuf to resize
+ * @size: New size
+ */
+static inline ssize_t vs_pbuf_resize(struct vs_pbuf *pbuf, size_t size)
+{
+ if (size > pbuf->max_size)
+ return -EOVERFLOW;
+
+ pbuf->size = size;
+ return size;
+}
+
+/**
+ * vs_pbuf_copyin - Copy data into a pbuf
+ * @pbuf: pbuf to copy data into
+ * @offset: Offset to copy data to
+ * @data: Pointer to data to copy into the pbuf
+ * @nbytes: Number of bytes to copy into the pbuf
+ */
+static inline ssize_t vs_pbuf_copyin(struct vs_pbuf *pbuf, off_t offset,
+ const void *data, size_t nbytes)
+{
+ if (offset + nbytes > pbuf->size)
+ return -EOVERFLOW;
+
+ memcpy(pbuf->data + offset, data, nbytes);
+
+ return nbytes;
+}
+
+/**
+ * vs_pbuf_append - Append data to a pbuf
+ * @pbuf: pbuf to append to
+ * @data: Pointer to data to append to the pbuf
+ * @nbytes: Number of bytes to append
+ */
+static inline ssize_t vs_pbuf_append(struct vs_pbuf *pbuf,
+ const void *data, size_t nbytes)
+{
+ if (pbuf->size + nbytes > pbuf->max_size)
+ return -EOVERFLOW;
+
+ memcpy(pbuf->data + pbuf->size, data, nbytes);
+ pbuf->size += nbytes;
+
+ return pbuf->size;
+}
+
+/**
+ * vs_pbuf_dup_string - Duplicate the contents of a pbuf as a C string. The
+ * string is allocated and must be freed using kfree.
+ * @pbuf: pbuf to convert
+ * @gfp_flags: GFP flags for the string allocation
+ */
+static inline char *vs_pbuf_dup_string(struct vs_pbuf *pbuf, gfp_t gfp_flags)
+{
+ return kstrndup(pbuf->data, pbuf->size, gfp_flags);
+}
+
+#endif /* _VSERVICES_BUFFER_H_ */
diff --git a/include/vservices/ioctl.h b/include/vservices/ioctl.h
new file mode 100644
index 0000000..d96fcab
--- /dev/null
+++ b/include/vservices/ioctl.h
@@ -0,0 +1,48 @@
+/*
+ * vservices/ioctl.h - Interface to service character devices
+ *
+ * Copyright (c) 2016, Cog Systems Pty Ltd
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PUBLIC_VSERVICES_IOCTL_H__
+#define __LINUX_PUBLIC_VSERVICES_IOCTL_H__
+
+#include <linux/types.h>
+#include <linux/compiler.h>
+
+/* ioctls that work on any opened service device */
+#define IOCTL_VS_RESET_SERVICE _IO('4', 0)
+#define IOCTL_VS_GET_NAME _IOR('4', 1, char[16])
+#define IOCTL_VS_GET_PROTOCOL _IOR('4', 2, char[32])
+
+/*
+ * Claim a device for user I/O (if no kernel driver is attached). The claim
+ * persists until the char device is closed.
+ */
+struct vs_ioctl_bind {
+ __u32 send_quota;
+ __u32 recv_quota;
+ __u32 send_notify_bits;
+ __u32 recv_notify_bits;
+ size_t msg_size;
+};
+#define IOCTL_VS_BIND_CLIENT _IOR('4', 3, struct vs_ioctl_bind)
+#define IOCTL_VS_BIND_SERVER _IOWR('4', 4, struct vs_ioctl_bind)
+
+/* send and receive messages and notifications */
+#define IOCTL_VS_NOTIFY _IOW('4', 5, __u32)
+struct vs_ioctl_iovec {
+ union {
+ __u32 iovcnt; /* input */
+ __u32 notify_bits; /* output (recv only) */
+ };
+ struct iovec *iov;
+};
+#define IOCTL_VS_SEND _IOW('4', 6, struct vs_ioctl_iovec)
+#define IOCTL_VS_RECV _IOWR('4', 7, struct vs_ioctl_iovec)
+
+#endif /* __LINUX_PUBLIC_VSERVICES_IOCTL_H__ */
diff --git a/include/vservices/protocol/Kbuild b/include/vservices/protocol/Kbuild
new file mode 100644
index 0000000..374d9b6
--- /dev/null
+++ b/include/vservices/protocol/Kbuild
@@ -0,0 +1,12 @@
+#
+# Find all of the protocol directory names, and get the basename followed
+# by a trailing slash.
+#
+protocols=$(shell find include/vservices/protocol/ -mindepth 1 -type d -exec basename {} \;)
+protocol_dirs=$(foreach p, $(protocols), $(p)/)
+
+#
+# Export the headers for all protocols. The kbuild file in each protocol
+# directory specifies exactly which headers to export.
+#
+header-y += $(protocol_dirs)
diff --git a/include/vservices/protocol/block/Kbuild b/include/vservices/protocol/block/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/block/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/block/client.h b/include/vservices/protocol/block/client.h
new file mode 100644
index 0000000..4cd2847
--- /dev/null
+++ b/include/vservices/protocol/block/client.h
@@ -0,0 +1,175 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_BLOCK__)
+#define __VSERVICES_CLIENT_BLOCK__
+
+struct vs_service_device;
+struct vs_client_block_state;
+
+struct vs_client_block {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+ /** session setup **/
+ struct vs_client_block_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_client_block_state * _state);
+
+ struct vs_service_driver *driver;
+
+/** Opened, reopened and closed functions **/
+
+ void (*opened) (struct vs_client_block_state * _state);
+
+ void (*reopened) (struct vs_client_block_state * _state);
+
+ void (*closed) (struct vs_client_block_state * _state);
+
+/** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_client_block_state * _state);
+
+ struct {
+ int (*ack_read) (struct vs_client_block_state * _state,
+ void *_opaque, struct vs_pbuf data,
+ struct vs_mbuf * _mbuf);
+ int (*nack_read) (struct vs_client_block_state * _state,
+ void *_opaque,
+ vservice_block_block_io_error_t err);
+
+ int (*ack_write) (struct vs_client_block_state * _state,
+ void *_opaque);
+ int (*nack_write) (struct vs_client_block_state * _state,
+ void *_opaque,
+ vservice_block_block_io_error_t err);
+
+ } io;
+};
+
+struct vs_client_block_state {
+ vservice_block_state_t state;
+ bool readonly;
+ uint32_t sector_size;
+ uint32_t segment_size;
+ uint64_t device_sectors;
+ bool flushable;
+ bool committable;
+ struct {
+ uint32_t sector_size;
+ uint32_t segment_size;
+ } io;
+ struct vs_service_device *service;
+ bool released;
+};
+
+extern int vs_client_block_reopen(struct vs_client_block_state *_state);
+
+extern int vs_client_block_close(struct vs_client_block_state *_state);
+
+ /** interface block_io **/
+/* command parallel read */
+extern int vs_client_block_io_getbufs_ack_read(struct vs_client_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_free_ack_read(struct vs_client_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_req_read(struct vs_client_block_state *_state,
+ void *_opaque, uint64_t sector_index,
+ uint32_t num_sects, bool nodelay,
+ bool flush, gfp_t flags);
+
+ /* command parallel write */
+extern struct vs_mbuf *vs_client_block_io_alloc_req_write(struct
+ vs_client_block_state
+ *_state,
+ struct vs_pbuf *data,
+ gfp_t flags);
+extern int vs_client_block_io_free_req_write(struct vs_client_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_block_io_req_write(struct vs_client_block_state *_state,
+ void *_opaque, uint64_t sector_index,
+ uint32_t num_sects, bool nodelay,
+ bool flush, bool commit,
+ struct vs_pbuf data,
+ struct vs_mbuf *_mbuf);
+
+/* Status APIs for async parallel commands */
+static inline bool vs_client_block_io_req_read_can_send(struct
+ vs_client_block_state
+ *_state)
+{
+ return !bitmap_full(_state->state.io.read_bitmask,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_read_is_pending(struct
+ vs_client_block_state
+ *_state)
+{
+ return !bitmap_empty(_state->state.io.read_bitmask,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_write_can_send(struct
+ vs_client_block_state
+ *_state)
+{
+ return !bitmap_full(_state->state.io.write_bitmask,
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+static inline bool vs_client_block_io_req_write_is_pending(struct
+ vs_client_block_state
+ *_state)
+{
+ return !bitmap_empty(_state->state.io.write_bitmask,
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_block_client_register(struct vs_client_block *client,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_block_client_register(struct vs_client_block *client,
+ const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_block_client_register(client, name, this_module);
+}
+
+extern int vservice_block_client_unregister(struct vs_client_block *client);
+
+#endif /* ! __VSERVICES_CLIENT_BLOCK__ */
diff --git a/include/vservices/protocol/block/common.h b/include/vservices/protocol/block/common.h
new file mode 100644
index 0000000..2779b18
--- /dev/null
+++ b/include/vservices/protocol/block/common.h
@@ -0,0 +1,42 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_BLOCK_PROTOCOL_H__)
+#define __VSERVICES_BLOCK_PROTOCOL_H__
+
+#define VSERVICE_BLOCK_PROTOCOL_NAME "com.ok-labs.block"
+typedef enum {
+ VSERVICE_BLOCK_BASE_REQ_OPEN,
+ VSERVICE_BLOCK_BASE_ACK_OPEN,
+ VSERVICE_BLOCK_BASE_NACK_OPEN,
+ VSERVICE_BLOCK_BASE_REQ_CLOSE,
+ VSERVICE_BLOCK_BASE_ACK_CLOSE,
+ VSERVICE_BLOCK_BASE_NACK_CLOSE,
+ VSERVICE_BLOCK_BASE_REQ_REOPEN,
+ VSERVICE_BLOCK_BASE_ACK_REOPEN,
+ VSERVICE_BLOCK_BASE_NACK_REOPEN,
+ VSERVICE_BLOCK_BASE_MSG_RESET,
+ VSERVICE_BLOCK_IO_REQ_READ,
+ VSERVICE_BLOCK_IO_ACK_READ,
+ VSERVICE_BLOCK_IO_NACK_READ,
+ VSERVICE_BLOCK_IO_REQ_WRITE,
+ VSERVICE_BLOCK_IO_ACK_WRITE,
+ VSERVICE_BLOCK_IO_NACK_WRITE,
+} vservice_block_message_id_t;
+typedef enum {
+ VSERVICE_BLOCK_NBIT_IN__COUNT
+} vservice_block_nbit_in_t;
+
+typedef enum {
+ VSERVICE_BLOCK_NBIT_OUT__COUNT
+} vservice_block_nbit_out_t;
+
+/* Notification mask macros */
+#endif /* ! __VSERVICES_BLOCK_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/block/server.h b/include/vservices/protocol/block/server.h
new file mode 100644
index 0000000..65b0bfd
--- /dev/null
+++ b/include/vservices/protocol/block/server.h
@@ -0,0 +1,177 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_BLOCK)
+#define VSERVICES_SERVER_BLOCK
+
+struct vs_service_device;
+struct vs_server_block_state;
+
+struct vs_server_block {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+
+ /*
+ * These are the driver's recommended message quotas. They are used
+ * by the core service to select message quotas for services with no
+ * explicitly configured quotas.
+ */
+ u32 in_quota_best;
+ u32 out_quota_best;
+ /** session setup **/
+ struct vs_server_block_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_server_block_state * _state);
+
+ struct vs_service_driver *driver;
+
+/** Open, reopen, close and closed functions **/
+
+ vs_server_response_type_t(*open) (struct vs_server_block_state *
+ _state);
+
+ vs_server_response_type_t(*reopen) (struct vs_server_block_state *
+ _state);
+
+ vs_server_response_type_t(*close) (struct vs_server_block_state *
+ _state);
+
+ void (*closed) (struct vs_server_block_state * _state);
+
+/** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_server_block_state * _state);
+
+ struct {
+ int (*req_read) (struct vs_server_block_state * _state,
+ uint32_t _opaque, uint64_t sector_index,
+ uint32_t num_sects, bool nodelay, bool flush);
+
+ int (*req_write) (struct vs_server_block_state * _state,
+ uint32_t _opaque, uint64_t sector_index,
+ uint32_t num_sects, bool nodelay, bool flush,
+ bool commit, struct vs_pbuf data,
+ struct vs_mbuf * _mbuf);
+
+ } io;
+};
+
+struct vs_server_block_state {
+ vservice_block_state_t state;
+ bool readonly;
+ uint32_t sector_size;
+ uint32_t segment_size;
+ uint64_t device_sectors;
+ bool flushable;
+ bool committable;
+ struct {
+ uint32_t sector_size;
+ uint32_t segment_size;
+ } io;
+ struct vs_service_device *service;
+ bool released;
+};
+
+/** Complete calls for server core functions **/
+extern int vs_server_block_open_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp);
+
+extern int vs_server_block_close_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp);
+
+extern int vs_server_block_reopen_complete(struct vs_server_block_state *_state,
+ vs_server_response_type_t resp);
+
+ /** interface block_io **/
+/* command parallel read */
+extern struct vs_mbuf *vs_server_block_io_alloc_ack_read(struct
+ vs_server_block_state
+ *_state,
+ struct vs_pbuf *data,
+ gfp_t flags);
+extern int vs_server_block_io_free_ack_read(struct vs_server_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_ack_read(struct vs_server_block_state
+ *_state, uint32_t _opaque,
+ struct vs_pbuf data,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_nack_read(struct vs_server_block_state
+ *_state, uint32_t _opaque,
+ vservice_block_block_io_error_t
+ err, gfp_t flags);
+ /* command parallel write */
+extern int vs_server_block_io_getbufs_req_write(struct vs_server_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_free_req_write(struct vs_server_block_state
+ *_state, struct vs_pbuf *data,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_block_io_send_ack_write(struct vs_server_block_state
+ *_state, uint32_t _opaque,
+ gfp_t flags);
+extern int vs_server_block_io_send_nack_write(struct vs_server_block_state
+ *_state, uint32_t _opaque,
+ vservice_block_block_io_error_t
+ err, gfp_t flags);
+
+static inline bool vs_server_block_io_send_ack_read_is_pending(struct
+ vs_server_block_state
+ *_state)
+{
+ return !bitmap_empty(_state->state.io.read_bitmask,
+ VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+}
+
+static inline bool vs_server_block_io_send_ack_write_is_pending(struct
+ vs_server_block_state
+ *_state)
+{
+ return !bitmap_empty(_state->state.io.write_bitmask,
+ VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+}
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_block_server_register(struct vs_server_block *server,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_block_server_register(struct vs_server_block *server,
+ const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_block_server_register(server, name, this_module);
+}
+
+extern int vservice_block_server_unregister(struct vs_server_block *server);
+#endif /* ! VSERVICES_SERVER_BLOCK */
diff --git a/include/vservices/protocol/block/types.h b/include/vservices/protocol/block/types.h
new file mode 100644
index 0000000..52845a3
--- /dev/null
+++ b/include/vservices/protocol/block/types.h
@@ -0,0 +1,106 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_BLOCK_TYPES_H)
+#define VSERVICES_BLOCK_TYPES_H
+
+#define VSERVICE_BLOCK_IO_READ_MAX_PENDING 1024
+#define VSERVICE_BLOCK_IO_WRITE_MAX_PENDING 1024
+
+typedef enum vservice_block_block_io_error {
+ VSERVICE_BLOCK_INVALID_INDEX,
+ VSERVICE_BLOCK_MEDIA_FAILURE,
+ VSERVICE_BLOCK_MEDIA_TIMEOUT,
+ VSERVICE_BLOCK_UNSUPPORTED_COMMAND,
+ VSERVICE_BLOCK_SERVICE_RESET
+} vservice_block_block_io_error_t;
+
+typedef enum {
+/* state closed */
+ VSERVICE_BASE_STATE_CLOSED = 0,
+ VSERVICE_BASE_STATE_CLOSED__OPEN,
+ VSERVICE_BASE_STATE_CLOSED__CLOSE,
+ VSERVICE_BASE_STATE_CLOSED__REOPEN,
+
+/* state running */
+ VSERVICE_BASE_STATE_RUNNING,
+ VSERVICE_BASE_STATE_RUNNING__OPEN,
+ VSERVICE_BASE_STATE_RUNNING__CLOSE,
+ VSERVICE_BASE_STATE_RUNNING__REOPEN,
+
+ VSERVICE_BASE__RESET = VSERVICE_BASE_STATE_CLOSED
+} vservice_base_statenum_t;
+
+typedef struct {
+ vservice_base_statenum_t statenum;
+} vservice_base_state_t;
+
+#define VSERVICE_BASE_RESET_STATE (vservice_base_state_t) { \
+.statenum = VSERVICE_BASE__RESET}
+
+#define VSERVICE_BASE_STATE_IS_CLOSED(state) (\
+((state).statenum == VSERVICE_BASE_STATE_CLOSED) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__REOPEN))
+
+#define VSERVICE_BASE_STATE_IS_RUNNING(state) (\
+((state).statenum == VSERVICE_BASE_STATE_RUNNING) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__REOPEN))
+
+#define VSERVICE_BASE_STATE_VALID(state) ( \
+VSERVICE_BASE_STATE_IS_CLOSED(state) ? true : \
+VSERVICE_BASE_STATE_IS_RUNNING(state) ? true : \
+false)
+
+static inline const char *vservice_base_get_state_string(vservice_base_state_t
+ state)
+{
+ static const char *names[] =
+ { "closed", "closed__open", "closed__close", "closed__reopen",
+ "running", "running__open", "running__close", "running__reopen"
+ };
+ if (!VSERVICE_BASE_STATE_VALID(state)) {
+ return "INVALID";
+ }
+ return names[state.statenum];
+}
+
+typedef struct {
+ DECLARE_BITMAP(read_bitmask, VSERVICE_BLOCK_IO_READ_MAX_PENDING);
+ void *read_tags[VSERVICE_BLOCK_IO_READ_MAX_PENDING];
+ DECLARE_BITMAP(write_bitmask, VSERVICE_BLOCK_IO_WRITE_MAX_PENDING);
+ void *write_tags[VSERVICE_BLOCK_IO_WRITE_MAX_PENDING];
+} vservice_block_io_state_t;
+
+#define VSERVICE_BLOCK_IO_RESET_STATE (vservice_block_io_state_t) { \
+.read_bitmask = {0}, \
+.read_tags = {NULL}, \
+.write_bitmask = {0}, \
+.write_tags = {NULL}}
+
+#define VSERVICE_BLOCK_IO_STATE_VALID(state) true
+
+typedef struct {
+
+ vservice_base_state_t base;
+
+ vservice_block_io_state_t io;
+} vservice_block_state_t;
+
+#define VSERVICE_BLOCK_RESET_STATE (vservice_block_state_t) {\
+.base = VSERVICE_BASE_RESET_STATE,\
+.io = VSERVICE_BLOCK_IO_RESET_STATE }
+
+#define VSERVICE_BLOCK_IS_STATE_RESET(state) \
+ ((state).base.statenum == VSERVICE_BASE__RESET)
+#endif /* ! VSERVICES_BLOCK_TYPES_H */
diff --git a/include/vservices/protocol/core.h b/include/vservices/protocol/core.h
new file mode 100644
index 0000000..3a86af5
--- /dev/null
+++ b/include/vservices/protocol/core.h
@@ -0,0 +1,145 @@
+/*
+ * include/vservices/protocol/core.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * These are the common generated definitions for the core protocol drivers;
+ * specifically the message IDs and the protocol state representation.
+ *
+ * This is currently hand-generated, but will eventually be autogenerated,
+ * from the protocol specifications in core.vs. Please keep it consistent
+ * with that file.
+ */
+
+#define VSERVICE_CORE_PROTOCOL_NAME "com.ok-labs.core"
+#define VSERVICE_CORE_PARAM_SIZE_SERVICE_INFO__PROTOCOL_NAME 32
+#define VSERVICE_CORE_PARAM_SIZE_SERVICE_INFO__SERVICE_NAME 16
+
+/*
+ * Identifiers for in-band messages.
+ *
+ * This definition applies in both directions, because there is no practical
+ * limit on message IDs (services are unlikely to define 2^16 distinct message
+ * names).
+ */
+typedef enum {
+ /** simple_protocol core **/
+ /* message out startup */
+ VSERVICE_CORE_MSG_STARTUP,
+
+ /* message out shutdown */
+ VSERVICE_CORE_MSG_SHUTDOWN,
+
+ /* command in sync connect */
+ VSERVICE_CORE_REQ_CONNECT,
+ VSERVICE_CORE_ACK_CONNECT,
+ VSERVICE_CORE_NACK_CONNECT,
+
+ /* command in sync disconnect */
+ VSERVICE_CORE_REQ_DISCONNECT,
+ VSERVICE_CORE_ACK_DISCONNECT,
+ VSERVICE_CORE_NACK_DISCONNECT,
+
+ /* command in service_count */
+ VSERVICE_CORE_REQ_SERVICE_COUNT,
+ VSERVICE_CORE_ACK_SERVICE_COUNT,
+ VSERVICE_CORE_NACK_SERVICE_COUNT,
+
+ /* command in queued service_info */
+ VSERVICE_CORE_REQ_SERVICE_INFO,
+ VSERVICE_CORE_ACK_SERVICE_INFO,
+ VSERVICE_CORE_NACK_SERVICE_INFO,
+
+ /* message inout service_reset */
+ VSERVICE_CORE_MSG_SERVICE_RESET,
+
+ /* message inout service_ready */
+ VSERVICE_CORE_MSG_SERVICE_READY,
+
+ /* message out notification bits */
+ VSERVICE_CORE_MSG_NOTIFICATION_BITS_INFO,
+
+} vservice_core_message_id_t;
+
+/*
+ * Notification bits are defined separately for each direction because there
+ * is relatively limited space to allocate them from (specifically, the bits in
+ * a machine word). It is unlikely but possible for a protocol to reach this
+ * limit.
+ */
+
+/* Bits in the in (client -> server) notification bitmask. */
+typedef enum {
+ /** simple_protocol core **/
+ /* No in notifications */
+
+ VSERVICE_CORE_NBIT_IN__COUNT = 0,
+} vservice_core_nbit_in_t;
+
+/* Masks for the in notification bits */
+/* No in notifications */
+
+/* Bits in the out (server -> client) notification bitmask. */
+typedef enum {
+ /** simple_protocol core **/
+ /* notification out reenumerate */
+ VSERVICE_CORE_NBIT_OUT_REENUMERATE = 0,
+
+ VSERVICE_CORE_NBIT_OUT__COUNT,
+} vservice_core_nbit_out_t;
+
+/* Masks for the out notification bits */
+#define VSERVICE_CORE_NMASK_OUT_REENUMERATE \
+ (1 << VSERVICE_CORE_NBIT_OUT_REENUMERATE)
+
+/* Valid states of the interface's generated state machine. */
+typedef enum {
+ /* state offline */
+ VSERVICE_CORE_STATE_OFFLINE = 0,
+
+ /* state disconnected */
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+
+ /* state connected */
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+
+ /* reset offline */
+ VSERVICE_CORE_STATE__RESET = VSERVICE_CORE_STATE_OFFLINE,
+} vservice_core_statenum_t;
+
+typedef struct {
+ vservice_core_statenum_t statenum;
+ bool pending_service_count;
+ unsigned pending_service_info;
+} vservice_core_state_t;
+
+#define VSERVICE_CORE_RESET_STATE (vservice_core_state_t) { \
+ .statenum = VSERVICE_CORE_STATE__RESET, \
+ .pending_service_count = false, \
+ .pending_service_info = 0 }
+
+#define VSERVICE_CORE_STATE_IS_OFFLINE(state) ( \
+ ((state).statenum == VSERVICE_CORE_STATE_OFFLINE))
+#define VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ( \
+ ((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED) || \
+ ((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__CONNECT))
+#define VSERVICE_CORE_STATE_IS_CONNECTED(state) ( \
+ ((state).statenum == VSERVICE_CORE_STATE_CONNECTED) || \
+ ((state).statenum == VSERVICE_CORE_STATE_CONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_VALID(state) \
+ VSERVICE_CORE_STATE_IS_OFFLINE(state) ? ( \
+ ((state).pending_service_count == false) && \
+ ((state).pending_service_info == 0)) : \
+ VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ? ( \
+ ((state).pending_service_count == false) && \
+ ((state).pending_service_info == 0)) : \
+ VSERVICE_CORE_STATE_IS_CONNECTED(state) ? true : \
+ false)
diff --git a/include/vservices/protocol/core/Kbuild b/include/vservices/protocol/core/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/core/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/core/client.h b/include/vservices/protocol/core/client.h
new file mode 100644
index 0000000..3d52999
--- /dev/null
+++ b/include/vservices/protocol/core/client.h
@@ -0,0 +1,155 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_CORE__)
+#define __VSERVICES_CLIENT_CORE__
+
+struct vs_service_device;
+struct vs_client_core_state;
+
+struct vs_client_core {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+ /** session setup **/
+ struct vs_client_core_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_client_core_state * _state);
+
+ struct vs_service_driver *driver;
+
+ /** Core service base interface **/
+ void (*start) (struct vs_client_core_state * _state);
+ void (*reset) (struct vs_client_core_state * _state);
+ /** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_client_core_state * _state);
+
+ struct {
+ int (*state_change) (struct vs_client_core_state * _state,
+ vservice_core_statenum_t old,
+ vservice_core_statenum_t new);
+
+ int (*ack_connect) (struct vs_client_core_state * _state);
+ int (*nack_connect) (struct vs_client_core_state * _state);
+
+ int (*ack_disconnect) (struct vs_client_core_state * _state);
+ int (*nack_disconnect) (struct vs_client_core_state * _state);
+
+ int (*msg_startup) (struct vs_client_core_state * _state,
+ uint32_t core_in_quota,
+ uint32_t core_out_quota);
+
+ int (*msg_shutdown) (struct vs_client_core_state * _state);
+
+ int (*msg_service_created) (struct vs_client_core_state *
+ _state, uint32_t service_id,
+ struct vs_string service_name,
+ struct vs_string protocol_name,
+ struct vs_mbuf * _mbuf);
+
+ int (*msg_service_removed) (struct vs_client_core_state *
+ _state, uint32_t service_id);
+
+ int (*msg_server_ready) (struct vs_client_core_state * _state,
+ uint32_t service_id, uint32_t in_quota,
+ uint32_t out_quota,
+ uint32_t in_bit_offset,
+ uint32_t in_num_bits,
+ uint32_t out_bit_offset,
+ uint32_t out_num_bits);
+
+ int (*msg_service_reset) (struct vs_client_core_state * _state,
+ uint32_t service_id);
+
+ } core;
+};
+
+struct vs_client_core_state {
+ vservice_core_protocol_state_t state;
+ struct vs_service_device *service;
+ bool released;
+};
+
+extern int vs_client_core_reopen(struct vs_client_core_state *_state);
+
+extern int vs_client_core_close(struct vs_client_core_state *_state);
+
+ /** interface core **/
+/* command sync connect */
+extern int vs_client_core_core_req_connect(struct vs_client_core_state *_state,
+ gfp_t flags);
+
+ /* command sync disconnect */
+extern int vs_client_core_core_req_disconnect(struct vs_client_core_state
+ *_state, gfp_t flags);
+
+ /* message startup */
+/* message shutdown */
+/* message service_created */
+extern int vs_client_core_core_getbufs_service_created(struct
+ vs_client_core_state
+ *_state,
+ struct vs_string
+ *service_name,
+ struct vs_string
+ *protocol_name,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_core_core_free_service_created(struct vs_client_core_state
+ *_state,
+ struct vs_string
+ *service_name,
+ struct vs_string
+ *protocol_name,
+ struct vs_mbuf *_mbuf);
+ /* message service_removed */
+/* message server_ready */
+/* message service_reset */
+extern int vs_client_core_core_send_service_reset(struct vs_client_core_state
+ *_state, uint32_t service_id,
+ gfp_t flags);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_core_client_register(struct vs_client_core *client,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_core_client_register(struct vs_client_core *client,
+ const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_core_client_register(client, name, this_module);
+}
+
+extern int vservice_core_client_unregister(struct vs_client_core *client);
+
+#endif /* ! __VSERVICES_CLIENT_CORE__ */
diff --git a/include/vservices/protocol/core/common.h b/include/vservices/protocol/core/common.h
new file mode 100644
index 0000000..b496416
--- /dev/null
+++ b/include/vservices/protocol/core/common.h
@@ -0,0 +1,38 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CORE_PROTOCOL_H__)
+#define __VSERVICES_CORE_PROTOCOL_H__
+
+#define VSERVICE_CORE_PROTOCOL_NAME "com.ok-labs.core"
+typedef enum {
+ VSERVICE_CORE_CORE_REQ_CONNECT,
+ VSERVICE_CORE_CORE_ACK_CONNECT,
+ VSERVICE_CORE_CORE_NACK_CONNECT,
+ VSERVICE_CORE_CORE_REQ_DISCONNECT,
+ VSERVICE_CORE_CORE_ACK_DISCONNECT,
+ VSERVICE_CORE_CORE_NACK_DISCONNECT,
+ VSERVICE_CORE_CORE_MSG_STARTUP,
+ VSERVICE_CORE_CORE_MSG_SHUTDOWN,
+ VSERVICE_CORE_CORE_MSG_SERVICE_CREATED,
+ VSERVICE_CORE_CORE_MSG_SERVICE_REMOVED,
+ VSERVICE_CORE_CORE_MSG_SERVER_READY,
+ VSERVICE_CORE_CORE_MSG_SERVICE_RESET,
+} vservice_core_message_id_t;
+typedef enum {
+ VSERVICE_CORE_NBIT_IN__COUNT
+} vservice_core_nbit_in_t;
+
+typedef enum {
+ VSERVICE_CORE_NBIT_OUT__COUNT
+} vservice_core_nbit_out_t;
+
+/* Notification mask macros */
+#endif /* ! __VSERVICES_CORE_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/core/server.h b/include/vservices/protocol/core/server.h
new file mode 100644
index 0000000..959b8c3
--- /dev/null
+++ b/include/vservices/protocol/core/server.h
@@ -0,0 +1,171 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_CORE)
+#define VSERVICES_SERVER_CORE
+
+struct vs_service_device;
+struct vs_server_core_state;
+
+struct vs_server_core {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+
+ /*
+ * These are the driver's recommended message quotas. They are used
+ * by the core service to select message quotas for services with no
+ * explicitly configured quotas.
+ */
+ u32 in_quota_best;
+ u32 out_quota_best;
+ /** session setup **/
+ struct vs_server_core_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_server_core_state * _state);
+
+ struct vs_service_driver *driver;
+
+ /** Core service base interface **/
+ void (*start) (struct vs_server_core_state * _state);
+ void (*reset) (struct vs_server_core_state * _state);
+ /** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_server_core_state * _state);
+
+ struct {
+ int (*state_change) (struct vs_server_core_state * _state,
+ vservice_core_statenum_t old,
+ vservice_core_statenum_t new);
+
+ int (*req_connect) (struct vs_server_core_state * _state);
+
+ int (*req_disconnect) (struct vs_server_core_state * _state);
+
+ int (*msg_service_reset) (struct vs_server_core_state * _state,
+ uint32_t service_id);
+
+ } core;
+};
+
+struct vs_server_core_state {
+ vservice_core_protocol_state_t state;
+ struct vs_service_device *service;
+ bool released;
+};
+
+/** Complete calls for server core functions **/
+
+ /** interface core **/
+/* command sync connect */
+extern int vs_server_core_core_send_ack_connect(struct vs_server_core_state
+ *_state, gfp_t flags);
+extern int vs_server_core_core_send_nack_connect(struct vs_server_core_state
+ *_state, gfp_t flags);
+ /* command sync disconnect */
+extern int vs_server_core_core_send_ack_disconnect(struct vs_server_core_state
+ *_state, gfp_t flags);
+extern int vs_server_core_core_send_nack_disconnect(struct vs_server_core_state
+ *_state, gfp_t flags);
+ /* message startup */
+extern int vs_server_core_core_send_startup(struct vs_server_core_state *_state,
+ uint32_t core_in_quota,
+ uint32_t core_out_quota,
+ gfp_t flags);
+
+ /* message shutdown */
+extern int vs_server_core_core_send_shutdown(struct vs_server_core_state
+ *_state, gfp_t flags);
+
+ /* message service_created */
+extern struct vs_mbuf *vs_server_core_core_alloc_service_created(struct
+ vs_server_core_state
+ *_state,
+ struct
+ vs_string
+ *service_name,
+ struct
+ vs_string
+ *protocol_name,
+ gfp_t flags);
+extern int vs_server_core_core_free_service_created(struct vs_server_core_state
+ *_state,
+ struct vs_string
+ *service_name,
+ struct vs_string
+ *protocol_name,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_core_core_send_service_created(struct vs_server_core_state
+ *_state,
+ uint32_t service_id,
+ struct vs_string
+ service_name,
+ struct vs_string
+ protocol_name,
+ struct vs_mbuf *_mbuf);
+
+ /* message service_removed */
+extern int vs_server_core_core_send_service_removed(struct vs_server_core_state
+ *_state,
+ uint32_t service_id,
+ gfp_t flags);
+
+ /* message server_ready */
+extern int vs_server_core_core_send_server_ready(struct vs_server_core_state
+ *_state, uint32_t service_id,
+ uint32_t in_quota,
+ uint32_t out_quota,
+ uint32_t in_bit_offset,
+ uint32_t in_num_bits,
+ uint32_t out_bit_offset,
+ uint32_t out_num_bits,
+ gfp_t flags);
+
+ /* message service_reset */
+extern int vs_server_core_core_send_service_reset(struct vs_server_core_state
+ *_state, uint32_t service_id,
+ gfp_t flags);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_core_server_register(struct vs_server_core *server,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_core_server_register(struct vs_server_core *server,
+ const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_core_server_register(server, name, this_module);
+}
+
+extern int vservice_core_server_unregister(struct vs_server_core *server);
+#endif /* ! VSERVICES_SERVER_CORE */
diff --git a/include/vservices/protocol/core/types.h b/include/vservices/protocol/core/types.h
new file mode 100644
index 0000000..2d6928d
--- /dev/null
+++ b/include/vservices/protocol/core/types.h
@@ -0,0 +1,87 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_CORE_TYPES_H)
+#define VSERVICES_CORE_TYPES_H
+
+#define VSERVICE_CORE_SERVICE_NAME_SIZE (uint32_t)16
+
+#define VSERVICE_CORE_PROTOCOL_NAME_SIZE (uint32_t)32
+
+typedef enum {
+/* state offline */
+ VSERVICE_CORE_STATE_OFFLINE = 0,
+ VSERVICE_CORE_STATE_OFFLINE__CONNECT,
+ VSERVICE_CORE_STATE_OFFLINE__DISCONNECT,
+
+/* state disconnected */
+ VSERVICE_CORE_STATE_DISCONNECTED,
+ VSERVICE_CORE_STATE_DISCONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_DISCONNECTED__DISCONNECT,
+
+/* state connected */
+ VSERVICE_CORE_STATE_CONNECTED,
+ VSERVICE_CORE_STATE_CONNECTED__CONNECT,
+ VSERVICE_CORE_STATE_CONNECTED__DISCONNECT,
+
+ VSERVICE_CORE__RESET = VSERVICE_CORE_STATE_OFFLINE
+} vservice_core_statenum_t;
+
+typedef struct {
+ vservice_core_statenum_t statenum;
+} vservice_core_state_t;
+
+#define VSERVICE_CORE_RESET_STATE (vservice_core_state_t) { \
+.statenum = VSERVICE_CORE__RESET}
+
+#define VSERVICE_CORE_STATE_IS_OFFLINE(state) (\
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE) || \
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_OFFLINE__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_IS_DISCONNECTED(state) (\
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED) || \
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_DISCONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_IS_CONNECTED(state) (\
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED) || \
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED__CONNECT) || \
+((state).statenum == VSERVICE_CORE_STATE_CONNECTED__DISCONNECT))
+
+#define VSERVICE_CORE_STATE_VALID(state) ( \
+VSERVICE_CORE_STATE_IS_OFFLINE(state) ? true : \
+VSERVICE_CORE_STATE_IS_DISCONNECTED(state) ? true : \
+VSERVICE_CORE_STATE_IS_CONNECTED(state) ? true : \
+false)
+
+static inline const char *vservice_core_get_state_string(vservice_core_state_t
+ state)
+{
+ static const char *names[] =
+ { "offline", "offline__connect", "offline__disconnect",
+ "disconnected", "disconnected__connect",
+ "disconnected__disconnect",
+ "connected", "connected__connect", "connected__disconnect"
+ };
+ if (!VSERVICE_CORE_STATE_VALID(state)) {
+ return "INVALID";
+ }
+ return names[state.statenum];
+}
+
+typedef struct {
+
+ vservice_core_state_t core;
+} vservice_core_protocol_state_t;
+
+#define VSERVICE_CORE_PROTOCOL_RESET_STATE (vservice_core_protocol_state_t) {\
+.core = VSERVICE_CORE_RESET_STATE }
+#endif /* ! VSERVICES_CORE_TYPES_H */
diff --git a/include/vservices/protocol/serial/Kbuild b/include/vservices/protocol/serial/Kbuild
new file mode 100644
index 0000000..ec3cbe8
--- /dev/null
+++ b/include/vservices/protocol/serial/Kbuild
@@ -0,0 +1 @@
+header-y += types.h
diff --git a/include/vservices/protocol/serial/client.h b/include/vservices/protocol/serial/client.h
new file mode 100644
index 0000000..78efed2e
--- /dev/null
+++ b/include/vservices/protocol/serial/client.h
@@ -0,0 +1,114 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_CLIENT_SERIAL__)
+#define __VSERVICES_CLIENT_SERIAL__
+
+struct vs_service_device;
+struct vs_client_serial_state;
+
+struct vs_client_serial {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+ /** session setup **/
+ struct vs_client_serial_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_client_serial_state * _state);
+
+ struct vs_service_driver *driver;
+
+/** Opened, reopened and closed functions **/
+
+ void (*opened) (struct vs_client_serial_state * _state);
+
+ void (*reopened) (struct vs_client_serial_state * _state);
+
+ void (*closed) (struct vs_client_serial_state * _state);
+
+/** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_client_serial_state * _state);
+
+ struct {
+ int (*msg_msg) (struct vs_client_serial_state * _state,
+ struct vs_pbuf b, struct vs_mbuf * _mbuf);
+
+ } serial;
+};
+
+struct vs_client_serial_state {
+ vservice_serial_protocol_state_t state;
+ uint32_t packet_size;
+ struct {
+ uint32_t packet_size;
+ } serial;
+ struct vs_service_device *service;
+ bool released;
+};
+
+extern int vs_client_serial_reopen(struct vs_client_serial_state *_state);
+
+extern int vs_client_serial_close(struct vs_client_serial_state *_state);
+
+ /** interface serial **/
+/* message msg */
+extern struct vs_mbuf *vs_client_serial_serial_alloc_msg(struct
+ vs_client_serial_state
+ *_state,
+ struct vs_pbuf *b,
+ gfp_t flags);
+extern int vs_client_serial_serial_getbufs_msg(struct vs_client_serial_state
+ *_state, struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_serial_serial_free_msg(struct vs_client_serial_state
+ *_state, struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf);
+extern int vs_client_serial_serial_send_msg(struct vs_client_serial_state
+ *_state, struct vs_pbuf b,
+ struct vs_mbuf *_mbuf);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_serial_client_register(struct vs_client_serial *client,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_serial_client_register(struct vs_client_serial
+ *client, const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_serial_client_register(client, name, this_module);
+}
+
+extern int vservice_serial_client_unregister(struct vs_client_serial *client);
+
+#endif /* ! __VSERVICES_CLIENT_SERIAL__ */
diff --git a/include/vservices/protocol/serial/common.h b/include/vservices/protocol/serial/common.h
new file mode 100644
index 0000000..a530645
--- /dev/null
+++ b/include/vservices/protocol/serial/common.h
@@ -0,0 +1,37 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(__VSERVICES_SERIAL_PROTOCOL_H__)
+#define __VSERVICES_SERIAL_PROTOCOL_H__
+
+#define VSERVICE_SERIAL_PROTOCOL_NAME "com.ok-labs.serial"
+typedef enum {
+ VSERVICE_SERIAL_BASE_REQ_OPEN,
+ VSERVICE_SERIAL_BASE_ACK_OPEN,
+ VSERVICE_SERIAL_BASE_NACK_OPEN,
+ VSERVICE_SERIAL_BASE_REQ_CLOSE,
+ VSERVICE_SERIAL_BASE_ACK_CLOSE,
+ VSERVICE_SERIAL_BASE_NACK_CLOSE,
+ VSERVICE_SERIAL_BASE_REQ_REOPEN,
+ VSERVICE_SERIAL_BASE_ACK_REOPEN,
+ VSERVICE_SERIAL_BASE_NACK_REOPEN,
+ VSERVICE_SERIAL_BASE_MSG_RESET,
+ VSERVICE_SERIAL_SERIAL_MSG_MSG,
+} vservice_serial_message_id_t;
+typedef enum {
+ VSERVICE_SERIAL_NBIT_IN__COUNT
+} vservice_serial_nbit_in_t;
+
+typedef enum {
+ VSERVICE_SERIAL_NBIT_OUT__COUNT
+} vservice_serial_nbit_out_t;
+
+/* Notification mask macros */
+#endif /* ! __VSERVICES_SERIAL_PROTOCOL_H__ */
diff --git a/include/vservices/protocol/serial/server.h b/include/vservices/protocol/serial/server.h
new file mode 100644
index 0000000..001fed5
--- /dev/null
+++ b/include/vservices/protocol/serial/server.h
@@ -0,0 +1,134 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERVER_SERIAL)
+#define VSERVICES_SERVER_SERIAL
+
+struct vs_service_device;
+struct vs_server_serial_state;
+
+struct vs_server_serial {
+
+ /*
+ * If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true the
+ * message handlers are run from tasklet context and may not sleep.
+ */
+ bool rx_atomic;
+
+ /*
+ * If this is set to true along with rx_atomic, the driver is allowed
+ * to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or
+ * from task context after calling vs_service_state_lock. This must
+ * not be set to true if rx_atomic is set to false.
+ */
+ bool tx_atomic;
+
+ /*
+ * These are the driver's recommended message quotas. They are used
+ * by the core service to select message quotas for services with no
+ * explicitly configured quotas.
+ */
+ u32 in_quota_best;
+ u32 out_quota_best;
+ /** session setup **/
+ struct vs_server_serial_state *(*alloc) (struct vs_service_device *
+ service);
+ void (*release) (struct vs_server_serial_state * _state);
+
+ struct vs_service_driver *driver;
+
+/** Open, reopen, close and closed functions **/
+
+ vs_server_response_type_t(*open) (struct vs_server_serial_state *
+ _state);
+
+ vs_server_response_type_t(*reopen) (struct vs_server_serial_state *
+ _state);
+
+ vs_server_response_type_t(*close) (struct vs_server_serial_state *
+ _state);
+
+ void (*closed) (struct vs_server_serial_state * _state);
+
+/** Send/receive state callbacks **/
+ int (*tx_ready) (struct vs_server_serial_state * _state);
+
+ struct {
+ int (*msg_msg) (struct vs_server_serial_state * _state,
+ struct vs_pbuf b, struct vs_mbuf * _mbuf);
+
+ } serial;
+};
+
+struct vs_server_serial_state {
+ vservice_serial_protocol_state_t state;
+ uint32_t packet_size;
+ struct {
+ uint32_t packet_size;
+ } serial;
+ struct vs_service_device *service;
+ bool released;
+};
+
+/** Complete calls for server core functions **/
+extern int vs_server_serial_open_complete(struct vs_server_serial_state *_state,
+ vs_server_response_type_t resp);
+
+extern int vs_server_serial_close_complete(struct vs_server_serial_state
+ *_state,
+ vs_server_response_type_t resp);
+
+extern int vs_server_serial_reopen_complete(struct vs_server_serial_state
+ *_state,
+ vs_server_response_type_t resp);
+
+ /** interface serial **/
+/* message msg */
+extern struct vs_mbuf *vs_server_serial_serial_alloc_msg(struct
+ vs_server_serial_state
+ *_state,
+ struct vs_pbuf *b,
+ gfp_t flags);
+extern int vs_server_serial_serial_getbufs_msg(struct vs_server_serial_state
+ *_state, struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_serial_serial_free_msg(struct vs_server_serial_state
+ *_state, struct vs_pbuf *b,
+ struct vs_mbuf *_mbuf);
+extern int vs_server_serial_serial_send_msg(struct vs_server_serial_state
+ *_state, struct vs_pbuf b,
+ struct vs_mbuf *_mbuf);
+
+/** Module registration **/
+
+struct module;
+
+extern int __vservice_serial_server_register(struct vs_server_serial *server,
+ const char *name,
+ struct module *owner);
+
+static inline int vservice_serial_server_register(struct vs_server_serial
+ *server, const char *name)
+{
+#ifdef MODULE
+ extern struct module __this_module;
+ struct module *this_module = &__this_module;
+#else
+ struct module *this_module = NULL;
+#endif
+
+ return __vservice_serial_server_register(server, name, this_module);
+}
+
+extern int vservice_serial_server_unregister(struct vs_server_serial *server);
+#endif /* ! VSERVICES_SERVER_SERIAL */
diff --git a/include/vservices/protocol/serial/types.h b/include/vservices/protocol/serial/types.h
new file mode 100644
index 0000000..46edf95
--- /dev/null
+++ b/include/vservices/protocol/serial/types.h
@@ -0,0 +1,88 @@
+
+/*
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#if !defined(VSERVICES_SERIAL_TYPES_H)
+#define VSERVICES_SERIAL_TYPES_H
+
+typedef enum {
+/* state closed */
+ VSERVICE_BASE_STATE_CLOSED = 0,
+ VSERVICE_BASE_STATE_CLOSED__OPEN,
+ VSERVICE_BASE_STATE_CLOSED__CLOSE,
+ VSERVICE_BASE_STATE_CLOSED__REOPEN,
+
+/* state running */
+ VSERVICE_BASE_STATE_RUNNING,
+ VSERVICE_BASE_STATE_RUNNING__OPEN,
+ VSERVICE_BASE_STATE_RUNNING__CLOSE,
+ VSERVICE_BASE_STATE_RUNNING__REOPEN,
+
+ VSERVICE_BASE__RESET = VSERVICE_BASE_STATE_CLOSED
+} vservice_base_statenum_t;
+
+typedef struct {
+ vservice_base_statenum_t statenum;
+} vservice_base_state_t;
+
+#define VSERVICE_BASE_RESET_STATE (vservice_base_state_t) { \
+.statenum = VSERVICE_BASE__RESET}
+
+#define VSERVICE_BASE_STATE_IS_CLOSED(state) (\
+((state).statenum == VSERVICE_BASE_STATE_CLOSED) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_CLOSED__REOPEN))
+
+#define VSERVICE_BASE_STATE_IS_RUNNING(state) (\
+((state).statenum == VSERVICE_BASE_STATE_RUNNING) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__OPEN) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__CLOSE) || \
+((state).statenum == VSERVICE_BASE_STATE_RUNNING__REOPEN))
+
+#define VSERVICE_BASE_STATE_VALID(state) ( \
+VSERVICE_BASE_STATE_IS_CLOSED(state) ? true : \
+VSERVICE_BASE_STATE_IS_RUNNING(state) ? true : \
+false)
+
+static inline const char *vservice_base_get_state_string(vservice_base_state_t
+ state)
+{
+ static const char *names[] =
+ { "closed", "closed__open", "closed__close", "closed__reopen",
+ "running", "running__open", "running__close", "running__reopen"
+ };
+ if (!VSERVICE_BASE_STATE_VALID(state)) {
+ return "INVALID";
+ }
+ return names[state.statenum];
+}
+
+typedef struct {
+} vservice_serial_state_t;
+
+#define VSERVICE_SERIAL_RESET_STATE (vservice_serial_state_t) { \
+}
+
+#define VSERVICE_SERIAL_STATE_VALID(state) true
+
+typedef struct {
+
+ vservice_base_state_t base;
+
+ vservice_serial_state_t serial;
+} vservice_serial_protocol_state_t;
+
+#define VSERVICE_SERIAL_PROTOCOL_RESET_STATE (vservice_serial_protocol_state_t) {\
+.base = VSERVICE_BASE_RESET_STATE,\
+.serial = VSERVICE_SERIAL_RESET_STATE }
+
+#define VSERVICE_SERIAL_IS_STATE_RESET(state) \
+ ((state).base.statenum == VSERVICE_BASE__RESET)
+#endif /* ! VSERVICES_SERIAL_TYPES_H */
diff --git a/include/vservices/service.h b/include/vservices/service.h
new file mode 100644
index 0000000..af232b6
--- /dev/null
+++ b/include/vservices/service.h
@@ -0,0 +1,674 @@
+/*
+ * include/vservices/service.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the driver and device types for vServices client and
+ * server drivers. These are generally defined by generated protocol-layer
+ * code. However, they can also be defined directly by applications that
+ * don't require protocol generation.
+ */
+
+#ifndef _VSERVICE_SERVICE_H_
+#define _VSERVICE_SERVICE_H_
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/jiffies.h>
+#include <linux/wait.h>
+#include <linux/err.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 38)
+#include <asm/atomic.h>
+#else
+#include <linux/atomic.h>
+#endif
+
+#include <vservices/transport.h>
+#include <vservices/session.h>
+#include <vservices/types.h>
+
+struct vs_mbuf;
+
+/**
+ * struct vs_service_driver - Virtual service driver structure
+ * @protocol: Protocol name for this driver
+ * @is_server: True if this is a server driver, false if it is a client driver
+ * @rx_atomic: If set to false then the receive message handlers are run from
+ * workqueue context and are allowed to sleep. If set to true
+ * the message handlers are run from tasklet context and may not
+ * sleep. For this purpose, tx_ready is considered a receive
+ * message handler.
+ * @tx_atomic: If this is set to true along with rx_atomic, the driver is
+ * allowed to send messages from softirq contexts other than the receive
+ * message handlers, after calling vs_service_state_lock_bh. Otherwise,
+ * messages may only be sent from the receive message handlers, or from
+ * task context after calling vs_service_state_lock.
+ * @probe: Probe function for this service
+ * @remove: Remove function for this service
+ * --- Callbacks ---
+ * @receive: Message handler function for this service
+ * @notify: Incoming notification handler function for this service
+ * @start: Callback which is run when this service is started
+ * @reset: Callback which is run when this service is reset
+ * @tx_ready: Callback which is run when the service has dropped below its
+ * send quota
+ * --- Resource requirements (valid for server only) ---
+ * @in_quota_min: minimum number of input messages for protocol functionality
+ * @in_quota_best: suggested number of input messages
+ * @out_quota_min: minimum number of output messages for protocol functionality
+ * @out_quota_best: suggested number of output messages
+ * @in_notify_count: number of input notification bits used
+ * @out_notify_count: number of output notification bits used
+ * --- Internal ---
+ * @driver: Linux device model driver structure
+ *
+ * The callback functions for a virtual service driver are all called from
+ * the virtual service device's work queue.
+ */
+struct vs_service_driver {
+ const char *protocol;
+ bool is_server;
+ bool rx_atomic, tx_atomic;
+
+ int (*probe)(struct vs_service_device *service);
+ int (*remove)(struct vs_service_device *service);
+
+ int (*receive)(struct vs_service_device *service,
+ struct vs_mbuf *mbuf);
+ void (*notify)(struct vs_service_device *service, u32 flags);
+
+ void (*start)(struct vs_service_device *service);
+ void (*reset)(struct vs_service_device *service);
+
+ int (*tx_ready)(struct vs_service_device *service);
+
+ unsigned in_quota_min;
+ unsigned in_quota_best;
+ unsigned out_quota_min;
+ unsigned out_quota_best;
+ unsigned in_notify_count;
+ unsigned out_notify_count;
+
+ struct device_driver driver;
+};
+
+#define to_vs_service_driver(d) \
+ container_of(d, struct vs_service_driver, driver)
+
+/* The vServices server/client bus types */
+extern struct bus_type vs_client_bus_type;
+extern struct bus_type vs_server_bus_type;
+
+/**
+ * struct vs_service_stats - Virtual service statistics
+ * @over_quota_time: Internal counter for tracking over quota time.
+ * @sent_mbufs: Total number of message buffers sent.
+ * @sent_bytes: Total bytes sent.
+ * @send_failures: Total number of send failures.
+ * @recv_mbufs: Total number of message buffers received.
+ * @recv_bytes: Total number of bytes recevied.
+ * @recv_failures: Total number of receive failures.
+ * @nr_over_quota: Number of times an mbuf allocation has failed because the
+ * service is over quota.
+ * @nr_tx_ready: Number of times the service has run its tx_ready handler
+ * @over_quota_time_total: The total amount of time in milli-seconds that the
+ * service has spent over quota. Measured as the time
+ * between exceeding quota in mbuf allocation and
+ * running the tx_ready handler.
+ * @over_quota_time_avg: The average amount of time in milli-seconds that the
+ * service is spending in the over quota state.
+ */
+struct vs_service_stats {
+ unsigned long over_quota_time;
+
+ atomic_t sent_mbufs;
+ atomic_t sent_bytes;
+ atomic_t send_failures;
+ atomic_t recv_mbufs;
+ atomic_t recv_bytes;
+ atomic_t recv_failures;
+ atomic_t nr_over_quota;
+ atomic_t nr_tx_ready;
+ atomic_t over_quota_time_total;
+ atomic_t over_quota_time_avg;
+};
+
+/**
+ * struct vs_service_device - Virtual service device
+ * @id: Unique ID (to the session) for this service
+ * @name: Service name
+ * @sysfs_name: The sysfs name for the service
+ * @protocol: Service protocol name
+ * @is_server: True if this device is server, false if it is a client
+ * @owner: service responsible for managing this service. This must be
+ * on the same session, and is NULL iff this is the core service.
+ * It must not be a service whose driver has tx_atomic set.
+ * @lock_subclass: the number of generations of owners between this service
+ * and the core service; 0 for the core service, 1 for anything directly
+ * created by it, and so on. This is only used for verifying lock
+ * ordering (when lockdep is enabled), hence the name.
+ * @ready_lock: mutex protecting readiness, disable_count and driver_probed.
+ * This depends on the state_mutex of the service's owner, if any. Acquire
+ * it using mutex_lock_nested(ready_lock, lock_subclass).
+ * @readiness: Service's readiness state, owned by session layer.
+ * @disable_count: Number of times the service has been disabled without
+ * a matching enable.
+ * @driver_probed: True if a driver has been probed (and not removed)
+ * @work_queue: Work queue for this service's task-context work.
+ * @rx_tasklet: Tasklet for handling incoming messages. This is only used
+ * if the service driver has rx_atomic set to true. Otherwise
+ * incoming messages are handled on the workqueue by rx_work.
+ * @rx_work: Work structure for handling incoming messages. This is only
+ * used if the service driver has rx_atomic set to false.
+ * @rx_lock: Spinlock which protects access to rx_queue and tx_ready
+ * @rx_queue: Queue of incoming messages
+ * @tx_ready: Flag indicating that a tx_ready event is pending
+ * @tx_batching: Flag indicating that outgoing messages are being batched
+ * @state_spinlock: spinlock used to protect the service state if the
+ * service driver has tx_atomic (and rx_atomic) set to true. This
+ * depends on the service's ready_lock. Acquire it only by
+ * calling vs_service_state_lock_bh().
+ * @state_mutex: mutex used to protect the service state if the service
+ * driver has tx_atomic set to false. This depends on the service's
+ * ready_lock, and if rx_atomic is true, the rx_tasklet must be
+ * disabled while it is held. Acquire it only by calling
+ * vs_service_state_lock().
+ * @state_spinlock_used: Flag to check if the state spinlock has been acquired.
+ * @state_mutex_used: Flag to check if the state mutex has been acquired.
+ * @reset_work: Work to reset the service after a driver fails
+ * @pending_reset: Set if reset_work has been queued and not completed.
+ * @ready_work: Work to make service ready after a throttling delay
+ * @cooloff_work: Work for cooling off reset throttling after the reset
+ * throttling limit was hit
+ * @cleanup_work: Work for cleaning up and freeing the service structure
+ * @last_reset: Time in jiffies at which this service last reset
+ * @last_reset_request: Time in jiffies the last reset request for this
+ * service occurred at
+ * @last_ready: Time in jiffies at which this service last became ready
+ * @reset_delay: Time in jiffies that the next throttled reset will be
+ * delayed for. A value of zero means that reset throttling is not in
+ * effect.
+ * @is_over_quota: Internal flag for whether the service is over quota. This
+ * flag is only used for stats accounting.
+ * @quota_wq: waitqueue that is woken whenever the available send quota
+ * increases.
+ * @notify_send_bits: The number of bits allocated for outgoing notifications.
+ * @notify_send_offset: The first bit allocated for outgoing notifications.
+ * @notify_recv_bits: The number of bits allocated for incoming notifications.
+ * @notify_recv_offset: The first bit allocated for incoming notifications.
+ * @send_quota: The maximum number of outgoing messages.
+ * @recv_quota: The maximum number of incoming messages.
+ * @in_quota_set: For servers, the number of client->server messages
+ * requested during system configuration (sysfs or environment).
+ * @out_quota_set: For servers, the number of server->client messages
+ * requested during system configuration (sysfs or environment).
+ * @dev: Linux device model device structure
+ * @stats: Service statistics
+ */
+struct vs_service_device {
+ vs_service_id_t id;
+ char *name;
+ char *sysfs_name;
+ char *protocol;
+ bool is_server;
+
+ struct vs_service_device *owner;
+ unsigned lock_subclass;
+
+ struct mutex ready_lock;
+ unsigned readiness;
+ int disable_count;
+ bool driver_probed;
+
+ struct workqueue_struct *work_queue;
+
+ struct tasklet_struct rx_tasklet;
+ struct work_struct rx_work;
+
+ spinlock_t rx_lock;
+ struct list_head rx_queue;
+ bool tx_ready, tx_batching;
+
+ spinlock_t state_spinlock;
+ struct mutex state_mutex;
+
+ struct work_struct reset_work;
+ bool pending_reset;
+ struct delayed_work ready_work;
+ struct delayed_work cooloff_work;
+ struct work_struct cleanup_work;
+
+ unsigned long last_reset;
+ unsigned long last_reset_request;
+ unsigned long last_ready;
+ unsigned long reset_delay;
+
+ atomic_t is_over_quota;
+ wait_queue_head_t quota_wq;
+
+ unsigned notify_send_bits;
+ unsigned notify_send_offset;
+ unsigned notify_recv_bits;
+ unsigned notify_recv_offset;
+ unsigned send_quota;
+ unsigned recv_quota;
+
+ unsigned in_quota_set;
+ unsigned out_quota_set;
+
+ void *transport_priv;
+
+ struct device dev;
+ struct vs_service_stats stats;
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ bool state_spinlock_used;
+ bool state_mutex_used;
+#endif
+};
+
+#define to_vs_service_device(d) container_of(d, struct vs_service_device, dev)
+
+/**
+ * vs_service_get_session - Return the session for a service
+ * @service: Service to get the session for
+ */
+static inline struct vs_session_device *
+vs_service_get_session(struct vs_service_device *service)
+{
+ return to_vs_session_device(service->dev.parent);
+}
+
+/**
+ * vs_service_send - Send a message from a service
+ * @service: Service to send the message from
+ * @mbuf: Message buffer to send
+ */
+static inline int
+vs_service_send(struct vs_service_device *service, struct vs_mbuf *mbuf)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ const struct vs_transport_vtable *vt = session->transport->vt;
+ const unsigned long flags =
+ service->tx_batching ? VS_TRANSPORT_SEND_FLAGS_MORE : 0;
+ size_t msg_size = vt->mbuf_size(mbuf);
+ int err;
+
+ err = vt->send(session->transport, service, mbuf, flags);
+ if (!err) {
+ atomic_inc(&service->stats.sent_mbufs);
+ atomic_add(msg_size, &service->stats.sent_bytes);
+ } else {
+ atomic_inc(&service->stats.send_failures);
+ }
+
+ return err;
+}
+
+/**
+ * vs_service_alloc_mbuf - Allocate a message buffer for a service
+ * @service: Service to allocate the buffer for
+ * @size: Size of the data buffer to allocate
+ * @flags: Flags to pass to the buffer allocation
+ */
+static inline struct vs_mbuf *
+vs_service_alloc_mbuf(struct vs_service_device *service, size_t size,
+ gfp_t flags)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+ struct vs_mbuf *mbuf;
+
+ mbuf = session->transport->vt->alloc_mbuf(session->transport,
+ service, size, flags);
+ if (IS_ERR(mbuf) && PTR_ERR(mbuf) == -ENOBUFS) {
+ /* Over quota accounting */
+ if (atomic_cmpxchg(&service->is_over_quota, 0, 1) == 0) {
+ service->stats.over_quota_time = jiffies;
+ atomic_inc(&service->stats.nr_over_quota);
+ }
+ }
+
+ /*
+ * The transport drivers should return either a valid message buffer
+ * pointer or an ERR_PTR value. Warn here if a transport driver is
+ * returning NULL on message buffer allocation failure.
+ */
+ if (WARN_ON_ONCE(!mbuf))
+ return ERR_PTR(-ENOMEM);
+
+ return mbuf;
+}
+
+/**
+ * vs_service_free_mbuf - Deallocate a message buffer for a service
+ * @service: Service the message buffer was allocated for
+ * @mbuf: Message buffer to deallocate
+ */
+static inline void
+vs_service_free_mbuf(struct vs_service_device *service, struct vs_mbuf *mbuf)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ session->transport->vt->free_mbuf(session->transport, service, mbuf);
+}
+
+/**
+ * vs_service_notify - Send a notification from a service
+ * @service: Service to send the notification from
+ * @flags: Notification bits to send
+ */
+static inline int
+vs_service_notify(struct vs_service_device *service, u32 flags)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ return session->transport->vt->notify(session->transport,
+ service, flags);
+}
+
+/**
+ * vs_service_has_atomic_rx - Return whether or not a service's receive
+ * message handler runs in atomic context. This function should only be
+ * called for services which are bound to a driver.
+ *
+ * @service: Service to check
+ */
+static inline bool
+vs_service_has_atomic_rx(struct vs_service_device *service)
+{
+ if (WARN_ON(!service->dev.driver))
+ return false;
+
+ return to_vs_service_driver(service->dev.driver)->rx_atomic;
+}
+
+/**
+ * vs_session_max_mbuf_size - Return the maximum allocation size of a message
+ * buffer.
+ * @service: The service to check
+ */
+static inline size_t
+vs_service_max_mbuf_size(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ return session->transport->vt->max_mbuf_size(session->transport);
+}
+
+/**
+ * vs_service_send_mbufs_available - Return the number of mbufs which can be
+ * allocated for sending before going over quota.
+ * @service: The service to check
+ */
+static inline ssize_t
+vs_service_send_mbufs_available(struct vs_service_device *service)
+{
+ struct vs_session_device *session = vs_service_get_session(service);
+
+ return session->transport->vt->service_send_avail(session->transport,
+ service);
+}
+
+/**
+ * vs_service_has_atomic_tx - Return whether or not a service is allowed to
+ * transmit from atomic context (other than its receive message handler).
+ * This function should only be called for services which are bound to a
+ * driver.
+ *
+ * @service: Service to check
+ */
+static inline bool
+vs_service_has_atomic_tx(struct vs_service_device *service)
+{
+ if (WARN_ON(!service->dev.driver))
+ return false;
+
+ return to_vs_service_driver(service->dev.driver)->tx_atomic;
+}
+
+/**
+ * vs_service_state_lock - Acquire a lock allowing service state operations
+ * from external task contexts.
+ *
+ * @service: Service to lock.
+ *
+ * This must be used to protect any service state accesses that occur in task
+ * contexts outside of a callback from the vservices protocol layer. It must
+ * not be called from a protocol layer callback, nor from atomic context.
+ *
+ * If this service's state is also accessed from softirq contexts other than
+ * vservices protocol layer callbacks, use vs_service_state_lock_bh instead,
+ * and set the driver's tx_atomic flag.
+ *
+ * If this is called from outside the service's workqueue, the calling driver
+ * must provide its own guarantee that it has not been detached from the
+ * service. If that is not possible, use vs_state_lock_safe().
+ */
+static inline void
+vs_service_state_lock(struct vs_service_device *service)
+__acquires(service)
+{
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ WARN_ON_ONCE(vs_service_has_atomic_tx(service));
+#endif
+
+ mutex_lock_nested(&service->state_mutex, service->lock_subclass);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ if (WARN_ON_ONCE(service->state_spinlock_used))
+ dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n");
+ service->state_mutex_used = true;
+#endif
+
+ if (vs_service_has_atomic_rx(service))
+ tasklet_disable(&service->rx_tasklet);
+
+ __acquire(service);
+}
+
+/**
+ * vs_service_state_unlock - Release the lock acquired by vs_service_state_lock.
+ *
+ * @service: Service to unlock.
+ */
+static inline void
+vs_service_state_unlock(struct vs_service_device *service)
+__releases(service)
+{
+ __release(service);
+
+ mutex_unlock(&service->state_mutex);
+
+ if (vs_service_has_atomic_rx(service)) {
+ tasklet_enable(&service->rx_tasklet);
+
+ /* Kick the tasklet if there is RX work to do */
+ if (!list_empty(&service->rx_queue))
+ tasklet_schedule(&service->rx_tasklet);
+ }
+}
+
+/**
+ * vs_service_state_lock_bh - Acquire a lock allowing service state operations
+ * from external task or softirq contexts.
+ *
+ * @service: Service to lock.
+ *
+ * This is an alternative to vs_service_state_lock for drivers that receive
+ * messages in atomic context (i.e. have their rx_atomic flag set), *and* must
+ * transmit messages from softirq contexts other than their own message
+ * receive and tx_ready callbacks. Such drivers must set their tx_atomic
+ * flag, so generated protocol drivers perform correct locking.
+ *
+ * This should replace all calls to vs_service_state_lock for services that
+ * need it. Do not use both locking functions in one service driver.
+ *
+ * The calling driver must provide its own guarantee that it has not been
+ * detached from the service. If that is not possible, use
+ * vs_state_lock_safe_bh().
+ */
+static inline void
+vs_service_state_lock_bh(struct vs_service_device *service)
+__acquires(service)
+__acquires(&service->state_spinlock)
+{
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ WARN_ON_ONCE(!vs_service_has_atomic_rx(service));
+ WARN_ON_ONCE(!vs_service_has_atomic_tx(service));
+#endif
+
+#ifdef CONFIG_SMP
+ /* Not necessary on UP because it's implied by spin_lock_bh(). */
+ tasklet_disable(&service->rx_tasklet);
+#endif
+
+ spin_lock_bh(&service->state_spinlock);
+
+#ifdef CONFIG_VSERVICES_LOCK_DEBUG
+ if (WARN_ON_ONCE(service->state_mutex_used))
+ dev_err(&service->dev, "Service is using both the state spinlock and mutex - Fix your driver\n");
+ service->state_spinlock_used = true;
+#endif
+
+ __acquire(service);
+}
+
+/**
+ * vs_service_state_unlock_bh - Release the lock acquired by
+ * vs_service_state_lock_bh.
+ *
+ * @service: Service to unlock.
+ */
+static inline void
+vs_service_state_unlock_bh(struct vs_service_device *service)
+__releases(service)
+__releases(&service->state_spinlock)
+{
+ __release(service);
+
+ spin_unlock_bh(&service->state_spinlock);
+
+#ifdef CONFIG_SMP
+ tasklet_enable(&service->rx_tasklet);
+#endif
+}
+
+/* Convenience macros for locking a state structure rather than a service. */
+#define vs_state_lock(state) vs_service_state_lock((state)->service)
+#define vs_state_unlock(state) vs_service_state_unlock((state)->service)
+#define vs_state_lock_bh(state) vs_service_state_lock_bh((state)->service)
+#define vs_state_unlock_bh(state) vs_service_state_unlock_bh((state)->service)
+
+/**
+ * vs_state_lock_safe[_bh] - Aqcuire a lock for a state structure's service,
+ * when the service may have been detached from the state.
+ *
+ * This is useful for blocking operations that can't easily be terminated
+ * before returning from the service reset handler, such as file I/O. To use
+ * this, the state structure should be reference-counted rather than freed in
+ * the release callback, and the driver should retain its own reference to the
+ * service until the state structure is freed.
+ *
+ * This macro acquires the lock and returns true if the state has not been
+ * detached from the service. Otherwise, it returns false.
+ *
+ * Note that the _bh variant cannot be used from atomic context, because it
+ * acquires a mutex.
+ */
+#define __vs_state_lock_safe(_state, _lock, _unlock) ({ \
+ bool __ok = true; \
+ typeof(_state) __state = (_state); \
+ struct vs_service_device *__service = __state->service; \
+ mutex_lock_nested(&__service->ready_lock, \
+ __service->lock_subclass); \
+ __ok = !ACCESS_ONCE(__state->released); \
+ if (__ok) { \
+ _lock(__state); \
+ __ok = !ACCESS_ONCE(__state->released); \
+ if (!__ok) \
+ _unlock(__state); \
+ } \
+ mutex_unlock(&__service->ready_lock); \
+ __ok; \
+})
+#define vs_state_lock_safe(_state) \
+ __vs_state_lock_safe((_state), vs_state_lock, vs_state_unlock)
+#define vs_state_lock_safe_bh(_state) \
+ __vs_state_lock_safe((_state), vs_state_lock_bh, vs_state_unlock_bh)
+
+/**
+ * vs_get_service - Get a reference to a service.
+ * @service: Service to get a reference to.
+ */
+static inline struct vs_service_device *
+vs_get_service(struct vs_service_device *service)
+{
+ if (service)
+ get_device(&service->dev);
+ return service;
+}
+
+/**
+ * vs_put_service - Put a reference to a service.
+ * @service: The service to put the reference to.
+ */
+static inline void
+vs_put_service(struct vs_service_device *service)
+{
+ put_device(&service->dev);
+}
+
+extern int vs_service_reset(struct vs_service_device *service,
+ struct vs_service_device *caller);
+extern void vs_service_reset_nosync(struct vs_service_device *service);
+
+/**
+ * vs_service_send_batch_start - Start a batch of outgoing messages
+ * @service: The service that is starting a batch
+ * @flush: Finish any previously started batch (if false, then duplicate
+ * calls to this function have no effect)
+ */
+static inline void
+vs_service_send_batch_start(struct vs_service_device *service, bool flush)
+{
+ if (flush && service->tx_batching) {
+ struct vs_session_device *session =
+ vs_service_get_session(service);
+ const struct vs_transport_vtable *vt = session->transport->vt;
+ if (vt->flush)
+ vt->flush(session->transport, service);
+ } else {
+ service->tx_batching = true;
+ }
+}
+
+/**
+ * vs_service_send_batch_end - End a batch of outgoing messages
+ * @service: The service that is ending a batch
+ * @flush: Start sending the batch immediately (if false, the batch will
+ * be flushed when the next message is sent)
+ */
+static inline void
+vs_service_send_batch_end(struct vs_service_device *service, bool flush)
+{
+ service->tx_batching = false;
+ if (flush) {
+ struct vs_session_device *session =
+ vs_service_get_session(service);
+ const struct vs_transport_vtable *vt = session->transport->vt;
+ if (vt->flush)
+ vt->flush(session->transport, service);
+ }
+}
+
+
+#endif /* _VSERVICE_SERVICE_H_ */
diff --git a/include/vservices/session.h b/include/vservices/session.h
new file mode 100644
index 0000000..b9dc775
--- /dev/null
+++ b/include/vservices/session.h
@@ -0,0 +1,161 @@
+/*
+ * include/vservices/session.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file defines the device type for a vServices session attached to a
+ * transport. This should only be used by transport drivers, the vServices
+ * session code, and the inline transport-access functions defined in
+ * vservices/service.h.
+ *
+ * Drivers for these devices are defined internally by the vServices
+ * framework. Other drivers should not attach to these devices.
+ */
+
+#ifndef _VSERVICES_SESSION_H_
+#define _VSERVICES_SESSION_H_
+
+#include <linux/types.h>
+#include <linux/device.h>
+#include <linux/spinlock.h>
+#include <linux/completion.h>
+#include <linux/mutex.h>
+#include <linux/list.h>
+#include <linux/idr.h>
+
+#include <vservices/types.h>
+
+struct vs_service_device;
+struct vs_mbuf;
+
+struct notifier_block;
+
+/**
+ * enum vs_notify_event_t - vService notifier events
+ *
+ * @VS_SESSION_NOTIFY_ADD: vService session added. Argument is a pointer to
+ * the vs_session_device. This notification is sent after the session has been
+ * added.
+ *
+ * @VS_SESSION_NOTIFY_REMOVE: vService session about to be removed. Argument is
+ * a pointer to the vs_session_device. This notification is sent before the
+ * session is removed.
+ */
+enum vs_notify_event_t {
+ VS_SESSION_NOTIFY_ADD,
+ VS_SESSION_NOTIFY_REMOVE,
+};
+
+/**
+ * struct vs_session_device - Session device
+ * @name: The unique human-readable name of this session.
+ * @is_server: True if this session is a server, false if client
+ * @transport: The transport device for this session
+ * @session_num: Unique ID for this session. Used for sysfs
+ * @session_lock: Mutex which protects any change to service presence or
+ * readiness
+ * @core_service: The core service, if one has ever been registered. Once set,
+ * this must remain valid and unchanged until the session driver is
+ * removed. Writes are protected by the service_ids_lock.
+ * @services: Dynamic array of the services on this session. Protected by
+ * service_ids_lock.
+ * @alloc_service_ids: Size of the session services array
+ * @service_ids_lock: Mutex protecting service array updates
+ * @activation_work: work structure for handling session activation & reset
+ * @activation_state: true if transport is currently active
+ * @fatal_error_work: work structure for handling fatal session failures
+ * @debug_mask: Debug level mask
+ * @list: Entry in the global session list
+ * @sysfs_entry: Kobject pointer pointing to session device in sysfs under
+ * sys/vservices
+ * @dev: Device structure for the Linux device model
+ */
+struct vs_session_device {
+ char *name;
+ bool is_server;
+ struct vs_transport *transport;
+ int session_num;
+
+ struct mutex session_lock;
+
+ /*
+ * The service_idr maintains the list of currently allocated services
+ * on a session, and allows for recycling of service ids. The lock also
+ * protects core_service.
+ */
+ struct idr service_idr;
+ struct mutex service_idr_lock;
+ struct vs_service_device *core_service;
+
+ struct work_struct activation_work;
+ atomic_t activation_state;
+
+ struct work_struct fatal_error_work;
+
+ unsigned long debug_mask;
+
+ struct list_head list;
+ struct kobject *sysfs_entry;
+
+ struct device dev;
+};
+
+#define to_vs_session_device(d) \
+ container_of(d, struct vs_session_device, dev)
+
+extern struct vs_session_device *
+vs_session_register(struct vs_transport *transport, struct device *parent,
+ bool server, const char *transport_name);
+extern void vs_session_start(struct vs_session_device *session);
+extern void vs_session_unregister(struct vs_session_device *session);
+
+extern int vs_session_handle_message(struct vs_session_device *session,
+ struct vs_mbuf *mbuf, vs_service_id_t service_id);
+
+extern void vs_session_quota_available(struct vs_session_device *session,
+ vs_service_id_t service_id, unsigned count,
+ bool send_tx_ready);
+
+extern void vs_session_handle_notify(struct vs_session_device *session,
+ unsigned long flags, vs_service_id_t service_id);
+
+extern void vs_session_handle_reset(struct vs_session_device *session);
+extern void vs_session_handle_activate(struct vs_session_device *session);
+
+extern struct vs_service_device *
+vs_server_create_service(struct vs_session_device *session,
+ struct vs_service_device *parent, const char *name,
+ const char *protocol, const void *plat_data);
+extern int vs_server_destroy_service(struct vs_service_device *service,
+ struct vs_service_device *parent);
+
+extern void vs_session_register_notify(struct notifier_block *nb);
+extern void vs_session_unregister_notify(struct notifier_block *nb);
+
+extern int vs_session_unbind_driver(struct vs_service_device *service);
+
+extern void vs_session_for_each_service(struct vs_session_device *session,
+ void (*func)(struct vs_service_device *, void *), void *data);
+
+extern struct mutex vs_session_lock;
+extern int vs_session_for_each_locked(
+ int (*fn)(struct vs_session_device *session, void *data),
+ void *data);
+
+static inline int vs_session_for_each(
+ int (*fn)(struct vs_session_device *session, void *data),
+ void *data)
+{
+ int r;
+ mutex_lock(&vs_session_lock);
+ r = vs_session_for_each_locked(fn, data);
+ mutex_unlock(&vs_session_lock);
+ return r;
+}
+
+#endif /* _VSERVICES_SESSION_H_ */
diff --git a/include/vservices/transport.h b/include/vservices/transport.h
new file mode 100644
index 0000000..6251ce1
--- /dev/null
+++ b/include/vservices/transport.h
@@ -0,0 +1,150 @@
+/*
+ * include/vservices/transport.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This file contains the transport vtable structure. This is made public so
+ * that the application drivers can call the vtable functions directly (via
+ * the inlined wrappers in service.h) rather than indirectly via a function
+ * call.
+ *
+ */
+
+#ifndef _VSERVICES_TRANSPORT_H_
+#define _VSERVICES_TRANSPORT_H_
+
+#include <linux/types.h>
+
+#include <vservices/types.h>
+
+struct vs_transport;
+struct vs_mbuf;
+struct vs_service_device;
+
+/**
+ * struct vs_transport_vtable - Transport driver operations. Transport drivers
+ * must provide implementations for all operations in this table.
+ * --- Message buffer allocation ---
+ * @alloc_mbuf: Allocate an mbuf of the given size for the given service
+ * @free_mbuf: Deallocate an mbuf
+ * @mbuf_size: Return the size in bytes of a message buffer. The size returned
+ * should be the total number of bytes including any headers.
+ * @max_mbuf_size: Return the maximum allowable message buffer allocation size.
+ * --- Message sending ---
+ * @send: Queue an mbuf for sending
+ * @flush: Start the transfer for the current message batch, if any
+ * @notify: Send a notification
+ * --- Transport-level reset handling ---
+ * @reset: Reset the transport layer
+ * @ready: Ready the transport layer
+ * --- Service management ---
+ * @service_add: A new service has been added to this transport's session
+ * @service_remove: A service has been removed from this transport's session
+ * @service_start: A service on this transport's session has had its resource
+ * allocations set and is about to start. This is always interleaved with
+ * service_reset, with one specific exception: the core service client,
+ * which has its quotas initially hard-coded to 0 send / 1 recv and
+ * adjusted when the initial startup message arrives.
+ * @service_reset: A service on this transport's session has just been reset,
+ * and any resources allocated to it should be cleaned up to prepare
+ * for later reallocation.
+ * @service_send_avail: The number of message buffers that this service is
+ * able to send before going over quota.
+ * --- Query transport capabilities ---
+ * @get_notify_bits: Fetch the number of sent and received notification bits
+ * supported by this transport. Note that this can be any positive value
+ * up to UINT_MAX.
+ * @get_quota_limits: Fetch the total send and receive message buffer quotas
+ * supported by this transport. Note that this can be any positive value
+ * up to UINT_MAX.
+ */
+struct vs_transport_vtable {
+ /* Message buffer allocation */
+ struct vs_mbuf *(*alloc_mbuf)(struct vs_transport *transport,
+ struct vs_service_device *service, size_t size,
+ gfp_t gfp_flags);
+ void (*free_mbuf)(struct vs_transport *transport,
+ struct vs_service_device *service,
+ struct vs_mbuf *mbuf);
+ size_t (*mbuf_size)(struct vs_mbuf *mbuf);
+ size_t (*max_mbuf_size)(struct vs_transport *transport);
+
+ /* Sending messages */
+ int (*send)(struct vs_transport *transport,
+ struct vs_service_device *service,
+ struct vs_mbuf *mbuf, unsigned long flags);
+ int (*flush)(struct vs_transport *transport,
+ struct vs_service_device *service);
+ int (*notify)(struct vs_transport *transport,
+ struct vs_service_device *service,
+ unsigned long bits);
+
+ /* Raising and clearing transport-level reset */
+ void (*reset)(struct vs_transport *transport);
+ void (*ready)(struct vs_transport *transport);
+
+ /* Service management */
+ int (*service_add)(struct vs_transport *transport,
+ struct vs_service_device *service);
+ void (*service_remove)(struct vs_transport *transport,
+ struct vs_service_device *service);
+
+ int (*service_start)(struct vs_transport *transport,
+ struct vs_service_device *service);
+ int (*service_reset)(struct vs_transport *transport,
+ struct vs_service_device *service);
+
+ ssize_t (*service_send_avail)(struct vs_transport *transport,
+ struct vs_service_device *service);
+
+ /* Query transport capabilities */
+ void (*get_notify_bits)(struct vs_transport *transport,
+ unsigned *send_notify_bits, unsigned *recv_notify_bits);
+ void (*get_quota_limits)(struct vs_transport *transport,
+ unsigned *send_quota, unsigned *recv_quota);
+};
+
+/* Flags for .send */
+#define VS_TRANSPORT_SEND_FLAGS_MORE 0x1
+
+/**
+ * struct vs_transport - A structure representing a transport
+ * @type: type of transport i.e. microvisror/loopback etc
+ * @vt: Transport operations table
+ * @notify_info: Array of incoming notification settings
+ * @notify_info_size: Size of the incoming notification array
+ */
+struct vs_transport {
+ const char *type;
+ const struct vs_transport_vtable *vt;
+ struct vs_notify_info *notify_info;
+ int notify_info_size;
+};
+
+/**
+ * struct vs_mbuf - Message buffer. This is always allocated and released by the
+ * transport callbacks defined above, so it may be embedded in a
+ * transport-specific structure containing additional state.
+ * @data: Message data buffer
+ * @size: Size of the data buffer in bytes
+ * @is_recv: True if this mbuf was received from the other end of the
+ * transport. False if it was allocated by this end for sending.
+ * @priv: Private value that will not be touched by the framework
+ * @queue: list_head for entry in lists. The session layer uses this queue
+ * for receiving messages. The transport driver may use this queue for its
+ * own purposes when sending messages.
+ */
+struct vs_mbuf {
+ void *data;
+ size_t size;
+ bool is_recv;
+ void *priv;
+ struct list_head queue;
+};
+
+#endif /* _VSERVICES_TRANSPORT_H_ */
diff --git a/include/vservices/types.h b/include/vservices/types.h
new file mode 100644
index 0000000..306156e
--- /dev/null
+++ b/include/vservices/types.h
@@ -0,0 +1,41 @@
+/*
+ * include/vservices/types.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef _VSERVICE_TYPES_H
+#define _VSERVICE_TYPES_H
+
+#include <linux/types.h>
+
+typedef u16 vs_service_id_t;
+typedef u16 vs_message_id_t;
+
+/*
+ * An opaque handle to a queued asynchronous command. This is used internally
+ * by the generated interface code, to identify which of the pending commands
+ * is being replied to. It is provided as a parameter to non-blocking handler
+ * callbacks for queued asynchronous requests, and must be stored by the server
+ * and passed to the corresponding reply call.
+ */
+typedef struct vservice_queued_request vservice_queued_request_t;
+
+/*
+ * Following enum is to be used by server for informing about successful or
+ * unsuccessful open callback by using VS_SERVER_RESP_SUCCESS or
+ * VS_SERVER_RESP_FAILURE resepectively. Server can choose to complete request
+ * explicitely in this case it should return VS_SERVER_RESP_EXPLICIT_COMPLETE.
+ */
+typedef enum vs_server_response_type {
+ VS_SERVER_RESP_SUCCESS,
+ VS_SERVER_RESP_FAILURE,
+ VS_SERVER_RESP_EXPLICIT_COMPLETE
+} vs_server_response_type_t;
+
+#endif /*_VSERVICE_TYPES_H */
diff --git a/include/vservices/wait.h b/include/vservices/wait.h
new file mode 100644
index 0000000..544937d
--- /dev/null
+++ b/include/vservices/wait.h
@@ -0,0 +1,455 @@
+/*
+ * include/vservices/wait.h
+ *
+ * Copyright (c) 2012-2018 General Dynamics
+ * Copyright (c) 2014 Open Kernel Labs, Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * Generic wait event helpers for Virtual Service drivers.
+ */
+
+#ifndef _VSERVICE_SERVICE_WAIT_H
+#define _VSERVICE_SERVICE_WAIT_H
+
+#include <linux/sched.h>
+#include <linux/wait.h>
+
+#include <vservices/service.h>
+
+/* Older kernels don't have lockdep_assert_held_once(). */
+#ifndef lockdep_assert_held_once
+#ifdef CONFIG_LOCKDEP
+#define lockdep_assert_held_once(l) do { \
+ WARN_ON_ONCE(debug_locks && !lockdep_is_held(l)); \
+ } while (0)
+#else
+#define lockdep_assert_held_once(l) do { } while (0)
+#endif
+#endif
+
+/* Legacy wait macro; needs rewriting to use vs_state_lock_safe(). */
+/* FIXME: Redmine ticket #229 - philip. */
+/**
+ * __vs_service_wait_event - Wait for a condition to become true for a
+ * Virtual Service.
+ *
+ * @_service: The service to wait for the condition to be true for.
+ * @_wq: Waitqueue to wait on.
+ * @_condition: Condition to wait for.
+ *
+ * Returns: This function returns 0 if the condition is true, or a -ERESTARTSYS
+ * if the wait loop wait interrupted. If _state is TASK_UNINTERRUPTIBLE
+ * then this function will always return 0.
+ *
+ * This function must be called with the service's state lock held. The wait
+ * is performed without the state lock held, but the condition is re-checked
+ * after reacquiring the state lock. This property allows this function to
+ * check the state of the service's protocol in a thread safe manner.
+ *
+ * The caller is responsible for ensuring that it has not been detached from
+ * the given service.
+ *
+ * It is nearly always wrong to call this on the service workqueue, since
+ * the workqueue is single-threaded and the state can only change when a
+ * handler function is called on it.
+ */
+#define __vs_service_wait_event(_service, _wq, _cond, _state) \
+ ({ \
+ DEFINE_WAIT(__wait); \
+ int __ret = 0; \
+ \
+ lockdep_assert_held_once(&(_service)->state_mutex); \
+ do { \
+ prepare_to_wait(&(_wq), &__wait, (_state)); \
+ \
+ if (_cond) \
+ break; \
+ \
+ if ((_state) == TASK_INTERRUPTIBLE && \
+ signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ \
+ vs_service_state_unlock(_service); \
+ schedule(); \
+ vs_service_state_lock(_service); \
+ } while (!(_cond)); \
+ \
+ finish_wait(&(_wq), &__wait); \
+ __ret; \
+ })
+
+/* Legacy wait macros; need rewriting to use __vs_wait_state(). */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_wait_event(_service, _wq, _cond) \
+ __vs_service_wait_event(_service, _wq, _cond, TASK_INTERRUPTIBLE)
+#define vs_service_wait_event_nointr(_service, _wq, _cond) \
+ __vs_service_wait_event(_service, _wq, _cond, TASK_UNINTERRUPTIBLE)
+
+/**
+ * __vs_wait_state - block until a condition becomes true on a service state.
+ *
+ * @_state: The protocol state to wait on.
+ * @_cond: Condition to wait for.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ * with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ * timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ * non-framework tasklet); otherwise nothing.
+ *
+ * Return: Return a pointer to a message buffer on successful allocation,
+ * or an error code in ERR_PTR form.
+ *
+ * This macro blocks waiting until a particular condition becomes true on a
+ * service state. The service must be running; if not, or if it ceases to be
+ * running during the wait, -ECANCELED will be returned.
+ *
+ * This is not an exclusive wait. If an exclusive wait is desired it is
+ * usually better to use the waiting alloc or send functions.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The state lock will be dropped by waiting
+ * but reacquired before returning, unless -ENOLINK is returned, in which case
+ * the service driver has been unbound and the lock cannot be reacquired.
+ */
+#define __vs_wait_state(_state, _cond, _intr, _timeout, _bh) \
+ ({ \
+ DEFINE_WAIT(__wait); \
+ int __ret; \
+ int __jiffies __maybe_unused = (_timeout); \
+ struct vs_service_device *__service = (_state)->service;\
+ \
+ while (1) { \
+ prepare_to_wait(&__service->quota_wq, &__wait, \
+ _intr ? TASK_INTERRUPTIBLE : \
+ TASK_UNINTERRUPTIBLE); \
+ \
+ if (!VSERVICE_BASE_STATE_IS_RUNNING( \
+ (_state)->state.base)) { \
+ __ret = -ECANCELED; \
+ break; \
+ } \
+ \
+ if (_cond) { \
+ __ret = 0; \
+ break; \
+ } \
+ \
+ if (_intr && signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ \
+ vs_state_unlock##_bh(_state); \
+ \
+ if (_timeout >= 0) { \
+ __jiffies = schedule_timeout(__jiffies);\
+ if (!__jiffies) { \
+ __ret = -ETIMEDOUT; \
+ break; \
+ } \
+ } else { \
+ schedule(); \
+ } \
+ \
+ if (!vs_state_lock_safe##_bh(_state)) { \
+ __ret = -ENOLINK; \
+ break; \
+ } \
+ } \
+ \
+ finish_wait(&__service->quota_wq, &__wait); \
+ __ret; \
+ })
+
+/* Specialisations of __vs_wait_state for common uses. */
+#define vs_wait_state(_state, _cond) \
+ __vs_wait_state(_state, _cond, true, -1,)
+#define vs_wait_state_timeout(_state, _cond, _timeout) \
+ __vs_wait_state(_state, _cond, true, _timeout,)
+#define vs_wait_state_nointr(_state, _cond) \
+ __vs_wait_state(_state, _cond, false, -1,)
+#define vs_wait_state_nointr_timeout(_state, _cond, _timeout) \
+ __vs_wait_state(_state, _cond, false, _timeout,)
+#define vs_wait_state_bh(_state, _cond) \
+ __vs_wait_state(_state, _cond, true, -1, _bh)
+#define vs_wait_state_timeout_bh(_state, _cond, _timeout) \
+ __vs_wait_state(_state, _cond, true, _timeout, _bh)
+#define vs_wait_state_nointr_bh(_state, _cond) \
+ __vs_wait_state(_state, _cond, false, -1, _bh)
+#define vs_wait_state_nointr_timeout_bh(_state, _cond, _timeout) \
+ __vs_wait_state(_state, _cond, false, _timeout, _bh)
+
+/**
+ * __vs_wait_alloc - block until quota is available, then allocate a buffer.
+ *
+ * @_state: The protocol state to allocate a message for.
+ * @_alloc_func: The message buffer allocation function to run. This is the
+ * full function invocation, not a pointer to the function.
+ * @_cond: Additional condition which must remain true, or else the wait
+ * will fail with -ECANCELED. This is typically used to check the
+ * service's protocol state. Note that this condition will only
+ * be checked after sleeping; it is assumed to be true when the
+ * macro is first called.
+ * @_unlock: If true, drop the service state lock before sleeping. The wait
+ * may then fail with -ENOLINK if the driver is detached from the
+ * service, in which case the lock is dropped.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ * with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ * timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ * non-framework tasklet); otherwise nothing.
+ *
+ * Return: Return a pointer to a message buffer on successful allocation,
+ * or an error code in ERR_PTR form.
+ *
+ * This macro calls a specified message allocation function, and blocks
+ * if it returns -ENOBUFS, waiting until quota is available on the service
+ * before retrying. It aborts the wait if the service resets, or if the
+ * optionally specified condition becomes false. Note that a reset followed
+ * quickly by an activate might not trigger a failure; if that is significant
+ * for your driver, use the optional condition to detect it.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The reference and state lock will still be
+ * held on return, unless -ENOLINK is returned, in which case the lock has been
+ * dropped and cannot be reacquired.
+ *
+ * This is always an exclusive wait. It is safe to call without separately
+ * waking the waitqueue afterwards; if the allocator function fails for any
+ * reason other than quota exhaustion then another waiter will be woken.
+ *
+ * Be wary of potential deadlocks when using this macro on the service
+ * workqueue. If both ends block their service workqueues waiting for quota,
+ * then no progress can be made. It is usually only correct to block the
+ * service workqueue on the server side.
+ */
+#define __vs_wait_alloc(_state, _alloc_func, _cond, _unlock, _intr, \
+ _timeout, _bh) \
+ ({ \
+ DEFINE_WAIT(__wait); \
+ struct vs_mbuf *__mbuf = NULL; \
+ int __jiffies __maybe_unused = (_timeout); \
+ struct vs_service_device *__service = (_state)->service;\
+ \
+ while (!vs_service_send_mbufs_available(__service)) { \
+ if (_intr && signal_pending(current)) { \
+ __mbuf = ERR_PTR(-ERESTARTSYS); \
+ break; \
+ } \
+ \
+ prepare_to_wait_exclusive( \
+ &__service->quota_wq, &__wait, \
+ _intr ? TASK_INTERRUPTIBLE : \
+ TASK_UNINTERRUPTIBLE); \
+ \
+ if (_unlock) \
+ vs_state_unlock##_bh(_state); \
+ \
+ if (_timeout >= 0) { \
+ __jiffies = schedule_timeout(__jiffies);\
+ if (!__jiffies) { \
+ __mbuf = ERR_PTR(-ETIMEDOUT); \
+ break; \
+ } \
+ } else { \
+ schedule(); \
+ } \
+ \
+ if (_unlock && !vs_state_lock_safe##_bh( \
+ _state)) { \
+ __mbuf = ERR_PTR(-ENOLINK); \
+ break; \
+ } \
+ \
+ if (!VSERVICE_BASE_STATE_IS_RUNNING( \
+ (_state)->state.base) || \
+ !(_cond)) { \
+ __mbuf = ERR_PTR(-ECANCELED); \
+ break; \
+ } \
+ } \
+ finish_wait(&__service->quota_wq, &__wait); \
+ \
+ if (__mbuf == NULL) \
+ __mbuf = (_alloc_func); \
+ if (IS_ERR(__mbuf) && (PTR_ERR(__mbuf) != -ENOBUFS)) \
+ wake_up(&__service->quota_wq); \
+ __mbuf; \
+ })
+
+/* Specialisations of __vs_wait_alloc for common uses. */
+#define vs_wait_alloc(_state, _cond, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1,)
+#define vs_wait_alloc_timeout(_state, _cond, _alloc_func, _timeout) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, true, _timeout,)
+#define vs_wait_alloc_nointr(_state, _cond, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1,)
+#define vs_wait_alloc_nointr_timeout(_state, _cond, _alloc_func, _timeout) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, false, _timeout,)
+#define vs_wait_alloc_bh(_state, _cond, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1, _bh)
+#define vs_wait_alloc_timeout_bh(_state, _cond, _alloc_func, _timeout) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, true, _timeout, _bh)
+#define vs_wait_alloc_nointr_bh(_state, _cond, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1, _bh)
+#define vs_wait_alloc_nointr_timeout_bh(_state, _cond, _alloc_func, _timeout) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, false, _timeout, _bh)
+#define vs_wait_alloc_locked(_state, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, true, false, true, -1,)
+
+/* Legacy wait macros, to be removed and replaced with those above. */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_waiting_alloc(_state, _alloc_func) \
+ __vs_wait_alloc(_state, _alloc_func, true, false, true, -1,)
+#define vs_service_waiting_alloc_cond_locked(_state, _alloc_func, _cond) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, true, -1,)
+#define vs_service_waiting_alloc_cond_locked_nointr(_state, _alloc_func, _cond) \
+ __vs_wait_alloc(_state, _alloc_func, _cond, true, false, -1,)
+
+/**
+ * __vs_wait_send - block until quota is available, then send a message.
+ *
+ * @_state: The protocol state to send a message for.
+ * @_cond: Additional condition which must remain true, or else the wait
+ * will fail with -ECANCELED. This is typically used to check the
+ * service's protocol state. Note that this condition will only
+ * be checked after sleeping; it is assumed to be true when the
+ * macro is first called.
+ * @_send_func: The message send function to run. This is the full function
+ * invocation, not a pointer to the function.
+ * @_unlock: If true, drop the service state lock before sleeping. The wait
+ * may then fail with -ENOLINK if the driver is detached from the
+ * service, in which case the lock is dropped.
+ * @_check_running: If true, the wait will return -ECANCELED if the service's
+ * base state is not active, or ceases to be active.
+ * @_intr: If true, perform an interruptible wait; the wait may then fail
+ * with -ERESTARTSYS.
+ * @_timeout: A timeout in jiffies, or negative for no timeout. If the
+ * timeout expires, the wait will fail with -ETIMEDOUT.
+ * @_bh: The token _bh if this service uses tx_atomic (sends from a
+ * non-framework tasklet); otherwise nothing.
+ *
+ * Return: If the send succeeds, then 0 is returned; otherwise an error
+ * code may be returned as described above.
+ *
+ * This macro calls a specified message send function, and blocks if it
+ * returns -ENOBUFS, waiting until quota is available on the service before
+ * retrying. It aborts the wait if it finds the service in reset, or if the
+ * optionally specified condition becomes false. Note that a reset followed
+ * quickly by an activate might not trigger a failure; if that is significant
+ * for your driver, use the optional condition to detect it.
+ *
+ * This macro must be called with a reference to the service held, and with
+ * the service's state lock held. The reference and state lock will still be
+ * held on return, unless -ENOLINK is returned, in which case the lock has been
+ * dropped and cannot be reacquired.
+ *
+ * This is always an exclusive wait. It is safe to call without separately
+ * waking the waitqueue afterwards; if the allocator function fails for any
+ * reason other than quota exhaustion then another waiter will be woken.
+ *
+ * Be wary of potential deadlocks when calling this function on the service
+ * workqueue. If both ends block their service workqueues waiting for quota,
+ * then no progress can be made. It is usually only correct to block the
+ * service workqueue on the server side.
+ */
+#define __vs_wait_send(_state, _cond, _send_func, _unlock, \
+ _check_running, _intr, _timeout, _bh) \
+ ({ \
+ DEFINE_WAIT(__wait); \
+ int __ret = 0; \
+ int __jiffies __maybe_unused = (_timeout); \
+ struct vs_service_device *__service = (_state)->service;\
+ \
+ while (!vs_service_send_mbufs_available(__service)) { \
+ if (_intr && signal_pending(current)) { \
+ __ret = -ERESTARTSYS; \
+ break; \
+ } \
+ \
+ prepare_to_wait_exclusive( \
+ &__service->quota_wq, &__wait, \
+ _intr ? TASK_INTERRUPTIBLE : \
+ TASK_UNINTERRUPTIBLE); \
+ \
+ if (_unlock) \
+ vs_state_unlock##_bh(_state); \
+ \
+ if (_timeout >= 0) { \
+ __jiffies = schedule_timeout(__jiffies);\
+ if (!__jiffies) { \
+ __ret = -ETIMEDOUT; \
+ break; \
+ } \
+ } else { \
+ schedule(); \
+ } \
+ \
+ if (_unlock && !vs_state_lock_safe##_bh( \
+ _state)) { \
+ __ret = -ENOLINK; \
+ break; \
+ } \
+ \
+ if ((_check_running && \
+ !VSERVICE_BASE_STATE_IS_RUNNING(\
+ (_state)->state.base)) || \
+ !(_cond)) { \
+ __ret = -ECANCELED; \
+ break; \
+ } \
+ } \
+ finish_wait(&__service->quota_wq, &__wait); \
+ \
+ if (!__ret) \
+ __ret = (_send_func); \
+ if ((__ret < 0) && (__ret != -ENOBUFS)) \
+ wake_up(&__service->quota_wq); \
+ __ret; \
+ })
+
+/* Specialisations of __vs_wait_send for common uses. */
+#define vs_wait_send(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, true, -1,)
+#define vs_wait_send_timeout(_state, _cond, _send_func, _timeout) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, true, _timeout,)
+#define vs_wait_send_nointr(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, false, -1,)
+#define vs_wait_send_nointr_timeout(_state, _cond, _send_func, _timeout) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, false, _timeout,)
+#define vs_wait_send_bh(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, true, -1, _bh)
+#define vs_wait_send_timeout_bh(_state, _cond, _send_func, _timeout) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, true, \
+ _timeout, _bh)
+#define vs_wait_send_nointr_bh(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, false, -1, _bh)
+#define vs_wait_send_nointr_timeout_bh(_state, _cond, _send_func, _timeout) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, false, \
+ _timeout, _bh)
+#define vs_wait_send_locked(_state, _send_func) \
+ __vs_wait_send(_state, true, _send_func, false, true, true, -1,)
+#define vs_wait_send_locked_nocheck(_state, _send_func) \
+ __vs_wait_send(_state, true, _send_func, false, false, true, -1,)
+
+/* Legacy wait macros, to be removed and replaced with those above. */
+/* FIXME: Redmine ticket #229 - philip. */
+#define vs_service_waiting_send(_state, _send_func) \
+ __vs_wait_send(_state, true, _send_func, true, true, true, -1,)
+#define vs_service_waiting_send_nointr(_state, _send_func) \
+ __vs_wait_send(_state, true, _send_func, true, true, false, -1,)
+#define vs_service_waiting_send_cond(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, true, -1,)
+#define vs_service_waiting_send_cond_nointr(_state, _cond, _send_func) \
+ __vs_wait_send(_state, _cond, _send_func, true, true, false, -1,)
+#define vs_service_waiting_send_nocheck(_state, _send_func) \
+ __vs_wait_send(_state, true, _send_func, true, false, true, -1,)
+
+#endif /* _VSERVICE_SERVICE_WAIT_H */
diff --git a/kernel/audit.c b/kernel/audit.c
index 3461a3d..194fa1a 100644
--- a/kernel/audit.c
+++ b/kernel/audit.c
@@ -125,6 +125,7 @@
static atomic_t audit_lost = ATOMIC_INIT(0);
/* The netlink socket. */
+static DEFINE_MUTEX(audit_sock_mutex);
static struct sock *audit_sock;
static int audit_net_id;
@@ -411,7 +412,9 @@
restart:
/* take a reference in case we can't send it and we want to hold it */
skb_get(skb);
+ mutex_lock(&audit_sock_mutex);
err = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+ mutex_unlock(&audit_sock_mutex);
if (err < 0) {
pr_err("netlink_unicast sending to audit_pid=%d returned error: %d\n",
audit_pid, err);
@@ -423,7 +426,9 @@
snprintf(s, sizeof(s), "audit_pid=%d reset", audit_pid);
audit_log_lost(s);
audit_pid = 0;
+ mutex_lock(&audit_sock_mutex);
audit_sock = NULL;
+ mutex_unlock(&audit_sock_mutex);
} else {
pr_warn("re-scheduling(#%d) write to audit_pid=%d\n",
attempts, audit_pid);
@@ -811,12 +816,16 @@
static int audit_replace(pid_t pid)
{
+ int len;
struct sk_buff *skb = audit_make_reply(0, 0, AUDIT_REPLACE, 0, 0,
&pid, sizeof(pid));
if (!skb)
return -ENOMEM;
- return netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+ mutex_lock(&audit_sock_mutex);
+ len = netlink_unicast(audit_sock, skb, audit_nlk_portid, 0);
+ mutex_unlock(&audit_sock_mutex);
+ return len;
}
static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
@@ -901,7 +910,9 @@
audit_log_config_change("audit_pid", new_pid, audit_pid, 1);
audit_pid = new_pid;
audit_nlk_portid = NETLINK_CB(skb).portid;
+ mutex_lock(&audit_sock_mutex);
audit_sock = skb->sk;
+ mutex_unlock(&audit_sock_mutex);
}
if (s.mask & AUDIT_STATUS_RATE_LIMIT) {
err = audit_set_rate_limit(s.rate_limit);
@@ -1169,10 +1180,12 @@
{
struct audit_net *aunet = net_generic(net, audit_net_id);
struct sock *sock = aunet->nlsk;
+ mutex_lock(&audit_sock_mutex);
if (sock == audit_sock) {
audit_pid = 0;
audit_sock = NULL;
}
+ mutex_unlock(&audit_sock_mutex);
RCU_INIT_POINTER(aunet->nlsk, NULL);
synchronize_net();
diff --git a/kernel/module.c b/kernel/module.c
index 8a84031..1277bdf 100644
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -4035,7 +4035,7 @@
for (i = 0; i < kallsyms->num_symtab; i++)
if (strcmp(name, symname(kallsyms, i)) == 0 &&
- kallsyms->symtab[i].st_info != 'U')
+ kallsyms->symtab[i].st_shndx != SHN_UNDEF)
return kallsyms->symtab[i].st_value;
return 0;
}
@@ -4081,6 +4081,10 @@
if (mod->state == MODULE_STATE_UNFORMED)
continue;
for (i = 0; i < kallsyms->num_symtab; i++) {
+
+ if (kallsyms->symtab[i].st_shndx == SHN_UNDEF)
+ continue;
+
ret = fn(data, symname(kallsyms, i),
mod, kallsyms->symtab[i].st_value);
if (ret != 0)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8f29103..424bb66 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8031,6 +8031,7 @@
raw_spin_unlock_irqrestore(&rq->lock, flags);
update_max_interval();
+ walt_update_min_max_capacity();
return 0;
}
@@ -8064,6 +8065,7 @@
return ret;
}
sched_domains_numa_masks_clear(cpu);
+ walt_update_min_max_capacity();
return 0;
}
diff --git a/kernel/sched/cpuacct.c b/kernel/sched/cpuacct.c
index bc0b309c..4c88279 100644
--- a/kernel/sched/cpuacct.c
+++ b/kernel/sched/cpuacct.c
@@ -297,7 +297,7 @@
for (stat = 0; stat < CPUACCT_STAT_NSTATS; stat++) {
seq_printf(sf, "%s %lld\n",
cpuacct_stat_desc[stat],
- cputime64_to_clock_t(val[stat]));
+ nsec_to_clock_t(val[stat]));
}
return 0;
diff --git a/kernel/sched/cputime.c b/kernel/sched/cputime.c
index 7cabd8c..a1f00f1 100644
--- a/kernel/sched/cputime.c
+++ b/kernel/sched/cputime.c
@@ -38,6 +38,18 @@
sched_clock_irqtime = 0;
}
+static void irqtime_account_delta(struct irqtime *irqtime, u64 delta,
+ enum cpu_usage_stat idx)
+{
+ u64 *cpustat = kcpustat_this_cpu->cpustat;
+
+ u64_stats_update_begin(&irqtime->sync);
+ cpustat[idx] += delta;
+ irqtime->total += delta;
+ irqtime->tick_delta += delta;
+ u64_stats_update_end(&irqtime->sync);
+}
+
/*
* Called before incrementing preempt_count on {soft,}irq_enter
* and before decrementing preempt_count on {soft,}irq_exit.
@@ -48,8 +60,9 @@
s64 delta;
int cpu;
u64 wallclock;
+#ifdef CONFIG_SCHED_WALT
bool account = true;
-
+#endif
if (!sched_clock_irqtime)
return;
@@ -58,7 +71,6 @@
delta = wallclock - irqtime->irq_start_time;
irqtime->irq_start_time += delta;
- u64_stats_update_begin(&irqtime->sync);
/*
* We do not account for softirq time from ksoftirqd here.
* We want to continue accounting softirq time to ksoftirqd thread
@@ -66,62 +78,38 @@
* that do not consume any time, but still wants to run.
*/
if (hardirq_count())
- irqtime->hardirq_time += delta;
+ irqtime_account_delta(irqtime, delta, CPUTIME_IRQ);
else if (in_serving_softirq() && curr != this_cpu_ksoftirqd())
- irqtime->softirq_time += delta;
+ irqtime_account_delta(irqtime, delta, CPUTIME_SOFTIRQ);
+#ifdef CONFIG_SCHED_WALT
else
account = false;
- u64_stats_update_end(&irqtime->sync);
-
if (account)
sched_account_irqtime(cpu, curr, delta, wallclock);
else if (curr != this_cpu_ksoftirqd())
sched_account_irqstart(cpu, curr, wallclock);
+#endif
}
EXPORT_SYMBOL_GPL(irqtime_account_irq);
-static cputime_t irqtime_account_update(u64 irqtime, int idx, cputime_t maxtime)
+static cputime_t irqtime_tick_accounted(cputime_t maxtime)
{
- u64 *cpustat = kcpustat_this_cpu->cpustat;
- u64 base = cpustat[idx];
- cputime_t irq_cputime;
+ struct irqtime *irqtime = this_cpu_ptr(&cpu_irqtime);
+ cputime_t delta;
- if (idx == CPUTIME_SOFTIRQ)
- base = kcpustat_this_cpu->softirq_no_ksoftirqd;
+ delta = nsecs_to_cputime(irqtime->tick_delta);
+ delta = min(delta, maxtime);
+ irqtime->tick_delta -= cputime_to_nsecs(delta);
- irq_cputime = nsecs_to_cputime64(irqtime) - base;
- irq_cputime = min(irq_cputime, maxtime);
- cpustat[idx] += irq_cputime;
-
- if (idx == CPUTIME_SOFTIRQ)
- kcpustat_this_cpu->softirq_no_ksoftirqd += irq_cputime;
-
- return irq_cputime;
-}
-
-static cputime_t irqtime_account_hi_update(cputime_t maxtime)
-{
- return irqtime_account_update(__this_cpu_read(cpu_irqtime.hardirq_time),
- CPUTIME_IRQ, maxtime);
-}
-
-static cputime_t irqtime_account_si_update(cputime_t maxtime)
-{
- return irqtime_account_update(__this_cpu_read(cpu_irqtime.softirq_time),
- CPUTIME_SOFTIRQ, maxtime);
+ return delta;
}
#else /* CONFIG_IRQ_TIME_ACCOUNTING */
#define sched_clock_irqtime (0)
-static cputime_t irqtime_account_hi_update(cputime_t dummy)
-{
- return 0;
-}
-
-static cputime_t irqtime_account_si_update(cputime_t dummy)
+static cputime_t irqtime_tick_accounted(cputime_t dummy)
{
return 0;
}
@@ -161,7 +149,7 @@
index = (task_nice(p) > 0) ? CPUTIME_NICE : CPUTIME_USER;
/* Add user time to cpustat. */
- task_group_account_field(p, index, (__force u64) cputime);
+ task_group_account_field(p, index, cputime_to_nsecs(cputime));
/* Account for user time used */
acct_account_cputime(p);
@@ -189,11 +177,11 @@
/* Add guest time to cpustat. */
if (task_nice(p) > 0) {
- cpustat[CPUTIME_NICE] += (__force u64) cputime;
- cpustat[CPUTIME_GUEST_NICE] += (__force u64) cputime;
+ cpustat[CPUTIME_NICE] += cputime_to_nsecs(cputime);
+ cpustat[CPUTIME_GUEST_NICE] += cputime_to_nsecs(cputime);
} else {
- cpustat[CPUTIME_USER] += (__force u64) cputime;
- cpustat[CPUTIME_GUEST] += (__force u64) cputime;
+ cpustat[CPUTIME_USER] += cputime_to_nsecs(cputime);
+ cpustat[CPUTIME_GUEST] += cputime_to_nsecs(cputime);
}
}
@@ -214,7 +202,7 @@
account_group_system_time(p, cputime);
/* Add system time to cpustat. */
- task_group_account_field(p, index, (__force u64) cputime);
+ task_group_account_field(p, index, cputime_to_nsecs(cputime));
/* Account for system time used */
acct_account_cputime(p);
@@ -258,7 +246,7 @@
{
u64 *cpustat = kcpustat_this_cpu->cpustat;
- cpustat[CPUTIME_STEAL] += (__force u64) cputime;
+ cpustat[CPUTIME_STEAL] += cputime_to_nsecs(cputime);
}
/*
@@ -271,9 +259,9 @@
struct rq *rq = this_rq();
if (atomic_read(&rq->nr_iowait) > 0)
- cpustat[CPUTIME_IOWAIT] += (__force u64) cputime;
+ cpustat[CPUTIME_IOWAIT] += cputime_to_nsecs(cputime);
else
- cpustat[CPUTIME_IDLE] += (__force u64) cputime;
+ cpustat[CPUTIME_IDLE] += cputime_to_nsecs(cputime);
}
/*
@@ -314,10 +302,7 @@
accounted = steal_account_process_time(max);
if (accounted < max)
- accounted += irqtime_account_hi_update(max - accounted);
-
- if (accounted < max)
- accounted += irqtime_account_si_update(max - accounted);
+ accounted += irqtime_tick_accounted(max - accounted);
return accounted;
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0bb5046..7944ae9 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -7350,7 +7350,7 @@
return cpumask_test_cpu(cpu, tsk_cpus_allowed(p)) &&
cpu_active(cpu) && !cpu_isolated(cpu) &&
capacity_orig_of(cpu) >= capacity_orig_of(rtg_target_cpu) &&
- task_fits_max(p, cpu);
+ task_fits_max(p, cpu) && !__cpu_overutilized(cpu, task_util(p));
}
#define SCHED_SELECT_PREV_CPU_NSEC 2000000
@@ -11166,7 +11166,8 @@
* - A task which has been woken up by try_to_wake_up() and
* waiting for actually being woken up by sched_ttwu_pending().
*/
- if (!se->sum_exec_runtime || p->state == TASK_WAKING)
+ if (!se->sum_exec_runtime ||
+ (p->state == TASK_WAKING && p->sched_remote_wakeup))
return true;
return false;
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index e24df36..c189b2a 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -4,6 +4,7 @@
#include <linux/sched/rt.h>
#include <linux/u64_stats_sync.h>
#include <linux/sched/deadline.h>
+#include <linux/kernel_stat.h>
#include <linux/binfmts.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>
@@ -2210,14 +2211,19 @@
#ifdef CONFIG_IRQ_TIME_ACCOUNTING
struct irqtime {
- u64 hardirq_time;
- u64 softirq_time;
+ u64 total;
+ u64 tick_delta;
u64 irq_start_time;
struct u64_stats_sync sync;
};
DECLARE_PER_CPU(struct irqtime, cpu_irqtime);
+/*
+ * Returns the irqtime minus the softirq time computed by ksoftirqd.
+ * Otherwise ksoftirqd's sum_exec_runtime is substracted its own runtime
+ * and never move forward.
+ */
static inline u64 irq_time_read(int cpu)
{
struct irqtime *irqtime = &per_cpu(cpu_irqtime, cpu);
@@ -2226,7 +2232,7 @@
do {
seq = __u64_stats_fetch_begin(&irqtime->sync);
- total = irqtime->softirq_time + irqtime->hardirq_time;
+ total = irqtime->total;
} while (__u64_stats_fetch_retry(&irqtime->sync, seq));
return total;
@@ -2422,7 +2428,11 @@
int i;
int max_cap = 0, min_cap = INT_MAX;
- for_each_online_cpu(i) {
+ for_each_possible_cpu(i) {
+
+ if (!cpu_active(i))
+ continue;
+
max_cap = max(max_cap, cpu_capacity(i));
min_cap = min(min_cap, cpu_capacity(i));
}
@@ -2687,6 +2697,7 @@
}
extern void walt_sched_energy_populated_callback(void);
+extern void walt_update_min_max_capacity(void);
#else /* CONFIG_SCHED_WALT */
@@ -2820,6 +2831,7 @@
}
static inline void walt_sched_energy_populated_callback(void) { }
+static inline void walt_update_min_max_capacity(void) { }
#endif /* CONFIG_SCHED_WALT */
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index a8fab0c..92fcb92 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -791,7 +791,7 @@
u64 prefer_idle)
{
struct schedtune *st = css_st(css);
- st->prefer_idle = prefer_idle;
+ st->prefer_idle = !!prefer_idle;
return 0;
}
diff --git a/kernel/sched/walt.c b/kernel/sched/walt.c
index 6144dee..5184252 100644
--- a/kernel/sched/walt.c
+++ b/kernel/sched/walt.c
@@ -2185,7 +2185,7 @@
return capacity;
}
-static void update_min_max_capacity(void)
+void walt_update_min_max_capacity(void)
{
unsigned long flags;
@@ -2411,7 +2411,7 @@
return 0;
if (val == CPUFREQ_REMOVE_POLICY || val == CPUFREQ_CREATE_POLICY) {
- update_min_max_capacity();
+ walt_update_min_max_capacity();
return 0;
}
diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
index 78c0e04..aa25aac 100644
--- a/kernel/time/alarmtimer.c
+++ b/kernel/time/alarmtimer.c
@@ -815,7 +815,8 @@
/* Convert (if necessary) to absolute time */
if (flags != TIMER_ABSTIME) {
ktime_t now = alarm_bases[type].gettime();
- exp = ktime_add(now, exp);
+
+ exp = ktime_add_safe(now, exp);
}
if (alarmtimer_do_nsleep(&alarm, exp))
diff --git a/kernel/time/time.c b/kernel/time/time.c
index 15f3487..9e5ffd1 100644
--- a/kernel/time/time.c
+++ b/kernel/time/time.c
@@ -704,6 +704,16 @@
#endif
}
+u64 jiffies64_to_nsecs(u64 j)
+{
+#if !(NSEC_PER_SEC % HZ)
+ return (NSEC_PER_SEC / HZ) * j;
+# else
+ return div_u64(j * HZ_TO_NSEC_NUM, HZ_TO_NSEC_DEN);
+#endif
+}
+EXPORT_SYMBOL(jiffies64_to_nsecs);
+
/**
* nsecs_to_jiffies64 - Convert nsecs in u64 to jiffies64
*
diff --git a/kernel/time/timeconst.bc b/kernel/time/timeconst.bc
index c486889..f83bbb8 100644
--- a/kernel/time/timeconst.bc
+++ b/kernel/time/timeconst.bc
@@ -98,6 +98,12 @@
print "#define HZ_TO_USEC_DEN\t\t", hz/cd, "\n"
print "#define USEC_TO_HZ_NUM\t\t", hz/cd, "\n"
print "#define USEC_TO_HZ_DEN\t\t", 1000000/cd, "\n"
+
+ cd=gcd(hz,1000000000)
+ print "#define HZ_TO_NSEC_NUM\t\t", 1000000000/cd, "\n"
+ print "#define HZ_TO_NSEC_DEN\t\t", hz/cd, "\n"
+ print "#define NSEC_TO_HZ_NUM\t\t", hz/cd, "\n"
+ print "#define NSEC_TO_HZ_DEN\t\t", 1000000000/cd, "\n"
print "\n"
print "#endif /* KERNEL_TIMECONST_H */\n"
diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
index dc29b60..f316e90 100644
--- a/kernel/trace/ring_buffer.c
+++ b/kernel/trace/ring_buffer.c
@@ -1504,6 +1504,8 @@
tmp_iter_page = first_page;
do {
+ cond_resched();
+
to_remove_page = tmp_iter_page;
rb_inc_page(cpu_buffer, &tmp_iter_page);
diff --git a/lib/klist.c b/lib/klist.c
index 0507fa5..f6b5478 100644
--- a/lib/klist.c
+++ b/lib/klist.c
@@ -336,8 +336,9 @@
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *prev;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
prev = to_klist_node(last->n_node.prev);
@@ -356,7 +357,7 @@
prev = to_klist_node(prev->n_node.prev);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
@@ -377,8 +378,9 @@
void (*put)(struct klist_node *) = i->i_klist->put;
struct klist_node *last = i->i_cur;
struct klist_node *next;
+ unsigned long flags;
- spin_lock(&i->i_klist->k_lock);
+ spin_lock_irqsave(&i->i_klist->k_lock, flags);
if (last) {
next = to_klist_node(last->n_node.next);
@@ -397,7 +399,7 @@
next = to_klist_node(next->n_node.next);
}
- spin_unlock(&i->i_klist->k_lock);
+ spin_unlock_irqrestore(&i->i_klist->k_lock, flags);
if (put && last)
put(last);
diff --git a/lib/rhashtable.c b/lib/rhashtable.c
index 101dac0..fdffd62 100644
--- a/lib/rhashtable.c
+++ b/lib/rhashtable.c
@@ -251,8 +251,10 @@
if (!new_tbl)
return 0;
- for (old_hash = 0; old_hash < old_tbl->size; old_hash++)
+ for (old_hash = 0; old_hash < old_tbl->size; old_hash++) {
rhashtable_rehash_chain(ht, old_hash);
+ cond_resched();
+ }
/* Publish the new table pointer. */
rcu_assign_pointer(ht->tbl, new_tbl);
@@ -993,6 +995,7 @@
for (i = 0; i < tbl->size; i++) {
struct rhash_head *pos, *next;
+ cond_resched();
for (pos = rht_dereference(tbl->buckets[i], ht),
next = !rht_is_a_nulls(pos) ?
rht_dereference(pos->next, ht) : NULL;
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 748079a..9814cb5 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1445,7 +1445,7 @@
bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
unsigned long new_addr, unsigned long old_end,
- pmd_t *old_pmd, pmd_t *new_pmd, bool *need_flush)
+ pmd_t *old_pmd, pmd_t *new_pmd)
{
spinlock_t *old_ptl, *new_ptl;
pmd_t pmd;
@@ -1476,7 +1476,7 @@
if (new_ptl != old_ptl)
spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
pmd = pmdp_huge_get_and_clear(mm, old_addr, old_pmd);
- if (pmd_present(pmd) && pmd_dirty(pmd))
+ if (pmd_present(pmd))
force_flush = true;
VM_BUG_ON(!pmd_none(*new_pmd));
@@ -1487,12 +1487,10 @@
pgtable_trans_huge_deposit(mm, new_pmd, pgtable);
}
set_pmd_at(mm, new_addr, new_pmd, pmd_mksoft_dirty(pmd));
- if (new_ptl != old_ptl)
- spin_unlock(new_ptl);
if (force_flush)
flush_tlb_range(vma, old_addr, old_addr + PMD_SIZE);
- else
- *need_flush = true;
+ if (new_ptl != old_ptl)
+ spin_unlock(new_ptl);
spin_unlock(old_ptl);
return true;
}
diff --git a/mm/madvise.c b/mm/madvise.c
index ee7ad9b..b753f02 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -83,7 +83,7 @@
new_flags |= VM_DONTDUMP;
break;
case MADV_DODUMP:
- if (new_flags & VM_SPECIAL) {
+ if (!is_vm_hugetlb_page(vma) && new_flags & VM_SPECIAL) {
error = -EINVAL;
goto out;
}
diff --git a/mm/mremap.c b/mm/mremap.c
index 2302762..0c2bac5 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -104,7 +104,7 @@
static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
unsigned long old_addr, unsigned long old_end,
struct vm_area_struct *new_vma, pmd_t *new_pmd,
- unsigned long new_addr, bool need_rmap_locks, bool *need_flush)
+ unsigned long new_addr, bool need_rmap_locks)
{
struct mm_struct *mm = vma->vm_mm;
pte_t *old_pte, *new_pte, pte;
@@ -152,15 +152,17 @@
pte = ptep_get_and_clear(mm, old_addr, old_pte);
/*
- * If we are remapping a dirty PTE, make sure
+ * If we are remapping a valid PTE, make sure
* to flush TLB before we drop the PTL for the
- * old PTE or we may race with page_mkclean().
+ * PTE.
*
- * This check has to be done after we removed the
- * old PTE from page tables or another thread may
- * dirty it after the check and before the removal.
+ * NOTE! Both old and new PTL matter: the old one
+ * for racing with page_mkclean(), the new one to
+ * make sure the physical page stays valid until
+ * the TLB entry for the old mapping has been
+ * flushed.
*/
- if (pte_present(pte) && pte_dirty(pte))
+ if (pte_present(pte))
force_flush = true;
pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
pte = move_soft_dirty_pte(pte);
@@ -168,13 +170,11 @@
}
arch_leave_lazy_mmu_mode();
+ if (force_flush)
+ flush_tlb_range(vma, old_end - len, old_end);
if (new_ptl != old_ptl)
spin_unlock(new_ptl);
pte_unmap(new_pte - 1);
- if (force_flush)
- flush_tlb_range(vma, old_end - len, old_end);
- else
- *need_flush = true;
pte_unmap_unlock(old_pte - 1, old_ptl);
if (need_rmap_locks)
drop_rmap_locks(vma);
@@ -189,7 +189,6 @@
{
unsigned long extent, next, old_end;
pmd_t *old_pmd, *new_pmd;
- bool need_flush = false;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
@@ -220,8 +219,7 @@
if (need_rmap_locks)
take_rmap_locks(vma);
moved = move_huge_pmd(vma, old_addr, new_addr,
- old_end, old_pmd, new_pmd,
- &need_flush);
+ old_end, old_pmd, new_pmd);
if (need_rmap_locks)
drop_rmap_locks(vma);
if (moved)
@@ -239,10 +237,8 @@
if (extent > LATENCY_LIMIT)
extent = LATENCY_LIMIT;
move_ptes(vma, old_pmd, old_addr, old_addr + extent, new_vma,
- new_pmd, new_addr, need_rmap_locks, &need_flush);
+ new_pmd, new_addr, need_rmap_locks);
}
- if (need_flush)
- flush_tlb_range(vma, old_end-len, old_addr);
mmu_notifier_invalidate_range_end(vma->vm_mm, mmun_start, mmun_end);
diff --git a/mm/shmem.c b/mm/shmem.c
index 61a39aa..290c5b8 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2160,6 +2160,8 @@
mpol_shared_policy_init(&info->policy, NULL);
break;
}
+
+ lockdep_annotate_inode_mutex_key(inode);
} else
shmem_free_inode(sb);
return inode;
diff --git a/mm/slub.c b/mm/slub.c
index 1a14283..b5c9fde 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1806,7 +1806,7 @@
{
struct page *page, *page2;
void *object = NULL;
- int available = 0;
+ unsigned int available = 0;
int objects;
/*
@@ -4900,10 +4900,10 @@
static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
size_t length)
{
- unsigned long objects;
+ unsigned int objects;
int err;
- err = kstrtoul(buf, 10, &objects);
+ err = kstrtouint(buf, 10, &objects);
if (err)
return err;
if (objects && !kmem_cache_has_cpu_partial(s))
diff --git a/mm/vmstat.c b/mm/vmstat.c
index fdd884a..727bc07 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -1079,6 +1079,9 @@
#ifdef CONFIG_SMP
"nr_tlb_remote_flush",
"nr_tlb_remote_flush_received",
+#else
+ "", /* nr_tlb_remote_flush */
+ "", /* nr_tlb_remote_flush_received */
#endif /* CONFIG_SMP */
"nr_tlb_local_flush_all",
"nr_tlb_local_flush_one",
@@ -1087,7 +1090,6 @@
#ifdef CONFIG_DEBUG_VM_VMACACHE
"vmacache_find_calls",
"vmacache_find_hits",
- "vmacache_full_flushes",
#endif
#ifdef CONFIG_SPECULATIVE_PAGE_FAULT
"speculative_pgfault"
diff --git a/net/6lowpan/iphc.c b/net/6lowpan/iphc.c
index 79f1fa2..23654f1 100644
--- a/net/6lowpan/iphc.c
+++ b/net/6lowpan/iphc.c
@@ -745,6 +745,7 @@
hdr.hop_limit, &hdr.daddr);
skb_push(skb, sizeof(hdr));
+ skb_reset_mac_header(skb);
skb_reset_network_header(skb);
skb_copy_to_linear_data(skb, &hdr, sizeof(hdr));
diff --git a/net/batman-adv/bat_v_elp.c b/net/batman-adv/bat_v_elp.c
index ee08540..5d79004 100644
--- a/net/batman-adv/bat_v_elp.c
+++ b/net/batman-adv/bat_v_elp.c
@@ -243,6 +243,7 @@
struct batadv_priv *bat_priv;
struct sk_buff *skb;
u32 elp_interval;
+ bool ret;
bat_v = container_of(work, struct batadv_hard_iface_bat_v, elp_wq.work);
hard_iface = container_of(bat_v, struct batadv_hard_iface, bat_v);
@@ -304,8 +305,11 @@
* may sleep and that is not allowed in an rcu protected
* context. Therefore schedule a task for that.
*/
- queue_work(batadv_event_workqueue,
- &hardif_neigh->bat_v.metric_work);
+ ret = queue_work(batadv_event_workqueue,
+ &hardif_neigh->bat_v.metric_work);
+
+ if (!ret)
+ batadv_hardif_neigh_put(hardif_neigh);
}
rcu_read_unlock();
diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c
index 582e276..8b6f654 100644
--- a/net/batman-adv/bridge_loop_avoidance.c
+++ b/net/batman-adv/bridge_loop_avoidance.c
@@ -1767,6 +1767,7 @@
{
struct batadv_bla_backbone_gw *backbone_gw;
struct ethhdr *ethhdr;
+ bool ret;
ethhdr = eth_hdr(skb);
@@ -1790,8 +1791,13 @@
if (unlikely(!backbone_gw))
return true;
- queue_work(batadv_event_workqueue, &backbone_gw->report_work);
- /* backbone_gw is unreferenced in the report work function function */
+ ret = queue_work(batadv_event_workqueue, &backbone_gw->report_work);
+
+ /* backbone_gw is unreferenced in the report work function function
+ * if queue_work() call was successful
+ */
+ if (!ret)
+ batadv_backbone_gw_put(backbone_gw);
return true;
}
diff --git a/net/batman-adv/network-coding.c b/net/batman-adv/network-coding.c
index e3baf69..a7b5cf0 100644
--- a/net/batman-adv/network-coding.c
+++ b/net/batman-adv/network-coding.c
@@ -845,24 +845,6 @@
spinlock_t *lock; /* Used to lock list selected by "int in_coding" */
struct list_head *list;
- /* Check if nc_node is already added */
- nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
-
- /* Node found */
- if (nc_node)
- return nc_node;
-
- nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
- if (!nc_node)
- return NULL;
-
- /* Initialize nc_node */
- INIT_LIST_HEAD(&nc_node->list);
- kref_init(&nc_node->refcount);
- ether_addr_copy(nc_node->addr, orig_node->orig);
- kref_get(&orig_neigh_node->refcount);
- nc_node->orig_node = orig_neigh_node;
-
/* Select ingoing or outgoing coding node */
if (in_coding) {
lock = &orig_neigh_node->in_coding_list_lock;
@@ -872,13 +854,34 @@
list = &orig_neigh_node->out_coding_list;
}
+ spin_lock_bh(lock);
+
+ /* Check if nc_node is already added */
+ nc_node = batadv_nc_find_nc_node(orig_node, orig_neigh_node, in_coding);
+
+ /* Node found */
+ if (nc_node)
+ goto unlock;
+
+ nc_node = kzalloc(sizeof(*nc_node), GFP_ATOMIC);
+ if (!nc_node)
+ goto unlock;
+
+ /* Initialize nc_node */
+ INIT_LIST_HEAD(&nc_node->list);
+ kref_init(&nc_node->refcount);
+ ether_addr_copy(nc_node->addr, orig_node->orig);
+ kref_get(&orig_neigh_node->refcount);
+ nc_node->orig_node = orig_neigh_node;
+
batadv_dbg(BATADV_DBG_NC, bat_priv, "Adding nc_node %pM -> %pM\n",
nc_node->addr, nc_node->orig_node->orig);
/* Add nc_node to orig_node */
- spin_lock_bh(lock);
kref_get(&nc_node->refcount);
list_add_tail_rcu(&nc_node->list, list);
+
+unlock:
spin_unlock_bh(lock);
return nc_node;
diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
index 84c1b38..05bc176 100644
--- a/net/batman-adv/soft-interface.c
+++ b/net/batman-adv/soft-interface.c
@@ -565,15 +565,20 @@
struct batadv_softif_vlan *vlan;
int err;
+ spin_lock_bh(&bat_priv->softif_vlan_list_lock);
+
vlan = batadv_softif_vlan_get(bat_priv, vid);
if (vlan) {
batadv_softif_vlan_put(vlan);
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -EEXIST;
}
vlan = kzalloc(sizeof(*vlan), GFP_ATOMIC);
- if (!vlan)
+ if (!vlan) {
+ spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
return -ENOMEM;
+ }
vlan->bat_priv = bat_priv;
vlan->vid = vid;
@@ -581,17 +586,23 @@
atomic_set(&vlan->ap_isolation, 0);
- err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
- if (err) {
- kfree(vlan);
- return err;
- }
-
- spin_lock_bh(&bat_priv->softif_vlan_list_lock);
kref_get(&vlan->refcount);
hlist_add_head_rcu(&vlan->list, &bat_priv->softif_vlan_list);
spin_unlock_bh(&bat_priv->softif_vlan_list_lock);
+ /* batadv_sysfs_add_vlan cannot be in the spinlock section due to the
+ * sleeping behavior of the sysfs functions and the fs_reclaim lock
+ */
+ err = batadv_sysfs_add_vlan(bat_priv->soft_iface, vlan);
+ if (err) {
+ /* ref for the function */
+ batadv_softif_vlan_put(vlan);
+
+ /* ref for the list */
+ batadv_softif_vlan_put(vlan);
+ return err;
+ }
+
/* add a new TT local entry. This one will be marked with the NOPURGE
* flag
*/
diff --git a/net/batman-adv/sysfs.c b/net/batman-adv/sysfs.c
index 02d96f2..31d7e23 100644
--- a/net/batman-adv/sysfs.c
+++ b/net/batman-adv/sysfs.c
@@ -187,7 +187,8 @@
\
return __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
- &bat_priv->_var, net_dev); \
+ &bat_priv->_var, net_dev, \
+ NULL); \
}
#define BATADV_ATTR_SIF_SHOW_UINT(_name, _var) \
@@ -261,7 +262,9 @@
\
length = __batadv_store_uint_attr(buff, count, _min, _max, \
_post_func, attr, \
- &hard_iface->_var, net_dev); \
+ &hard_iface->_var, \
+ hard_iface->soft_iface, \
+ net_dev); \
\
batadv_hardif_put(hard_iface); \
return length; \
@@ -355,10 +358,12 @@
static int batadv_store_uint_attr(const char *buff, size_t count,
struct net_device *net_dev,
+ struct net_device *slave_dev,
const char *attr_name,
unsigned int min, unsigned int max,
atomic_t *attr)
{
+ char ifname[IFNAMSIZ + 3] = "";
unsigned long uint_val;
int ret;
@@ -384,8 +389,11 @@
if (atomic_read(attr) == uint_val)
return count;
- batadv_info(net_dev, "%s: Changing from: %i to: %lu\n",
- attr_name, atomic_read(attr), uint_val);
+ if (slave_dev)
+ snprintf(ifname, sizeof(ifname), "%s: ", slave_dev->name);
+
+ batadv_info(net_dev, "%s: %sChanging from: %i to: %lu\n",
+ attr_name, ifname, atomic_read(attr), uint_val);
atomic_set(attr, uint_val);
return count;
@@ -396,12 +404,13 @@
void (*post_func)(struct net_device *),
const struct attribute *attr,
atomic_t *attr_store,
- struct net_device *net_dev)
+ struct net_device *net_dev,
+ struct net_device *slave_dev)
{
int ret;
- ret = batadv_store_uint_attr(buff, count, net_dev, attr->name, min, max,
- attr_store);
+ ret = batadv_store_uint_attr(buff, count, net_dev, slave_dev,
+ attr->name, min, max, attr_store);
if (post_func && ret)
post_func(net_dev);
@@ -570,7 +579,7 @@
return __batadv_store_uint_attr(buff, count, 1, BATADV_TQ_MAX_VALUE,
batadv_post_gw_reselect, attr,
&bat_priv->gw.sel_class,
- bat_priv->soft_iface);
+ bat_priv->soft_iface, NULL);
}
static ssize_t batadv_show_gw_bwidth(struct kobject *kobj,
@@ -1084,8 +1093,9 @@
if (old_tp_override == tp_override)
goto out;
- batadv_info(net_dev, "%s: Changing from: %u.%u MBit to: %u.%u MBit\n",
- "throughput_override",
+ batadv_info(hard_iface->soft_iface,
+ "%s: %s: Changing from: %u.%u MBit to: %u.%u MBit\n",
+ "throughput_override", net_dev->name,
old_tp_override / 10, old_tp_override % 10,
tp_override / 10, tp_override % 10);
diff --git a/net/batman-adv/translation-table.c b/net/batman-adv/translation-table.c
index 0dc85eb..b9f9a31 100644
--- a/net/batman-adv/translation-table.c
+++ b/net/batman-adv/translation-table.c
@@ -1550,6 +1550,8 @@
{
struct batadv_tt_orig_list_entry *orig_entry;
+ spin_lock_bh(&tt_global->list_lock);
+
orig_entry = batadv_tt_global_orig_entry_find(tt_global, orig_node);
if (orig_entry) {
/* refresh the ttvn: the current value could be a bogus one that
@@ -1570,16 +1572,16 @@
orig_entry->ttvn = ttvn;
kref_init(&orig_entry->refcount);
- spin_lock_bh(&tt_global->list_lock);
kref_get(&orig_entry->refcount);
hlist_add_head_rcu(&orig_entry->list,
&tt_global->orig_list);
- spin_unlock_bh(&tt_global->list_lock);
atomic_inc(&tt_global->orig_list_count);
out:
if (orig_entry)
batadv_tt_orig_list_entry_put(orig_entry);
+
+ spin_unlock_bh(&tt_global->list_lock);
}
/**
diff --git a/net/batman-adv/tvlv.c b/net/batman-adv/tvlv.c
index 77654f0..8e91a26 100644
--- a/net/batman-adv/tvlv.c
+++ b/net/batman-adv/tvlv.c
@@ -528,15 +528,20 @@
{
struct batadv_tvlv_handler *tvlv_handler;
+ spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
+
tvlv_handler = batadv_tvlv_handler_get(bat_priv, type, version);
if (tvlv_handler) {
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
batadv_tvlv_handler_put(tvlv_handler);
return;
}
tvlv_handler = kzalloc(sizeof(*tvlv_handler), GFP_ATOMIC);
- if (!tvlv_handler)
+ if (!tvlv_handler) {
+ spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
return;
+ }
tvlv_handler->ogm_handler = optr;
tvlv_handler->unicast_handler = uptr;
@@ -546,7 +551,6 @@
kref_init(&tvlv_handler->refcount);
INIT_HLIST_NODE(&tvlv_handler->list);
- spin_lock_bh(&bat_priv->tvlv.handler_list_lock);
kref_get(&tvlv_handler->refcount);
hlist_add_head_rcu(&tvlv_handler->list, &bat_priv->tvlv.handler_list);
spin_unlock_bh(&bat_priv->tvlv.handler_list_lock);
diff --git a/net/bridge/netfilter/ebt_arpreply.c b/net/bridge/netfilter/ebt_arpreply.c
index 070cf13..f2660c1 100644
--- a/net/bridge/netfilter/ebt_arpreply.c
+++ b/net/bridge/netfilter/ebt_arpreply.c
@@ -67,6 +67,9 @@
if (e->ethproto != htons(ETH_P_ARP) ||
e->invflags & EBT_IPROTO)
return -EINVAL;
+ if (ebt_invalid_target(info->target))
+ return -EINVAL;
+
return 0;
}
diff --git a/net/core/dev.c b/net/core/dev.c
index df92fb8..62dd763 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1666,6 +1666,28 @@
}
EXPORT_SYMBOL(call_netdevice_notifiers);
+/**
+ * call_netdevice_notifiers_mtu - call all network notifier blocks
+ * @val: value passed unmodified to notifier function
+ * @dev: net_device pointer passed unmodified to notifier function
+ * @arg: additional u32 argument passed to the notifier function
+ *
+ * Call all network notifier blocks. Parameters and return value
+ * are as for raw_notifier_call_chain().
+ */
+static int call_netdevice_notifiers_mtu(unsigned long val,
+ struct net_device *dev, u32 arg)
+{
+ struct netdev_notifier_info_ext info = {
+ .info.dev = dev,
+ .ext.mtu = arg,
+ };
+
+ BUILD_BUG_ON(offsetof(struct netdev_notifier_info_ext, info) != 0);
+
+ return call_netdevice_notifiers_info(val, dev, &info.info);
+}
+
#ifdef CONFIG_NET_INGRESS
static struct static_key ingress_needed __read_mostly;
@@ -6721,14 +6743,16 @@
err = __dev_set_mtu(dev, new_mtu);
if (!err) {
- err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ err = call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ orig_mtu);
err = notifier_to_errno(err);
if (err) {
/* setting mtu back and notifying everyone again,
* so that they have a chance to revert changes.
*/
__dev_set_mtu(dev, orig_mtu);
- call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
+ call_netdevice_notifiers_mtu(NETDEV_CHANGEMTU, dev,
+ new_mtu);
}
}
return err;
diff --git a/net/core/neighbour.c b/net/core/neighbour.c
index 340a3db..2cfbe3f 100644
--- a/net/core/neighbour.c
+++ b/net/core/neighbour.c
@@ -1147,6 +1147,12 @@
lladdr = neigh->ha;
}
+ /* Update confirmed timestamp for neighbour entry after we
+ * received ARP packet even if it doesn't change IP to MAC binding.
+ */
+ if (new & NUD_CONNECTED)
+ neigh->confirmed = jiffies;
+
/* If entry was valid and address is not changed,
do not change entry state, if new one is STALE.
*/
@@ -1168,15 +1174,12 @@
}
}
- /* Update timestamps only once we know we will make a change to the
+ /* Update timestamp only once we know we will make a change to the
* neighbour entry. Otherwise we risk to move the locktime window with
* noop updates and ignore relevant ARP updates.
*/
- if (new != old || lladdr != neigh->ha) {
- if (new & NUD_CONNECTED)
- neigh->confirmed = jiffies;
+ if (new != old || lladdr != neigh->ha)
neigh->updated = jiffies;
- }
if (new != old) {
neigh_del_timer(neigh);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index 194e844..189082d 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -2368,6 +2368,12 @@
else if (ops->get_num_rx_queues)
num_rx_queues = ops->get_num_rx_queues();
+ if (num_tx_queues < 1 || num_tx_queues > 4096)
+ return ERR_PTR(-EINVAL);
+
+ if (num_rx_queues < 1 || num_rx_queues > 4096)
+ return ERR_PTR(-EINVAL);
+
err = -ENOMEM;
dev = alloc_netdev_mqs(ops->priv_size, ifname, name_assign_type,
ops->setup, num_tx_queues, num_rx_queues);
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index b6a319c..cfadc0a 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1584,6 +1584,20 @@
}
EXPORT_SYMBOL(___pskb_trim);
+/* Note : use pskb_trim_rcsum() instead of calling this directly
+ */
+int pskb_trim_rcsum_slow(struct sk_buff *skb, unsigned int len)
+{
+ if (skb->ip_summed == CHECKSUM_COMPLETE) {
+ int delta = skb->len - len;
+
+ skb->csum = csum_sub(skb->csum,
+ skb_checksum(skb, len, delta, 0));
+ }
+ return __pskb_trim(skb, len);
+}
+EXPORT_SYMBOL(pskb_trim_rcsum_slow);
+
/**
* __pskb_pull_tail - advance tail of skb header
* @skb: buffer to reallocate
@@ -2431,20 +2445,27 @@
/**
* skb_rbtree_purge - empty a skb rbtree
* @root: root of the rbtree to empty
+ * Return value: the sum of truesizes of all purged skbs.
*
* Delete all buffers on an &sk_buff rbtree. Each buffer is removed from
* the list and one reference dropped. This function does not take
* any lock. Synchronization should be handled by the caller (e.g., TCP
* out-of-order queue is protected by the socket lock).
*/
-void skb_rbtree_purge(struct rb_root *root)
+unsigned int skb_rbtree_purge(struct rb_root *root)
{
- struct sk_buff *skb, *next;
+ struct rb_node *p = rb_first(root);
+ unsigned int sum = 0;
- rbtree_postorder_for_each_entry_safe(skb, next, root, rbnode)
+ while (p) {
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, root);
+ sum += skb->truesize;
kfree_skb(skb);
-
- *root = RB_ROOT;
+ }
+ return sum;
}
/**
diff --git a/net/dccp/input.c b/net/dccp/input.c
index 4a05d78..84ff43a 100644
--- a/net/dccp/input.c
+++ b/net/dccp/input.c
@@ -605,11 +605,13 @@
if (sk->sk_state == DCCP_LISTEN) {
if (dh->dccph_type == DCCP_PKT_REQUEST) {
/* It is possible that we process SYN packets from backlog,
- * so we need to make sure to disable BH right there.
+ * so we need to make sure to disable BH and RCU right there.
*/
+ rcu_read_lock();
local_bh_disable();
acceptable = inet_csk(sk)->icsk_af_ops->conn_request(sk, skb) >= 0;
local_bh_enable();
+ rcu_read_unlock();
if (!acceptable)
return 1;
consume_skb(skb);
diff --git a/net/dccp/ipv4.c b/net/dccp/ipv4.c
index 6697b18..28ad6f1 100644
--- a/net/dccp/ipv4.c
+++ b/net/dccp/ipv4.c
@@ -493,9 +493,11 @@
dh->dccph_checksum = dccp_v4_csum_finish(skb, ireq->ir_loc_addr,
ireq->ir_rmt_addr);
+ rcu_read_lock();
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- ireq_opt_deref(ireq));
+ rcu_dereference(ireq->ireq_opt));
+ rcu_read_unlock();
err = net_xmit_eval(err);
}
diff --git a/net/ieee802154/6lowpan/6lowpan_i.h b/net/ieee802154/6lowpan/6lowpan_i.h
index 5ac7789..3bfec47 100644
--- a/net/ieee802154/6lowpan/6lowpan_i.h
+++ b/net/ieee802154/6lowpan/6lowpan_i.h
@@ -16,37 +16,19 @@
#define LOWPAN_DISPATCH_FRAG1 0xc0
#define LOWPAN_DISPATCH_FRAGN 0xe0
-struct lowpan_create_arg {
+struct frag_lowpan_compare_key {
u16 tag;
u16 d_size;
- const struct ieee802154_addr *src;
- const struct ieee802154_addr *dst;
+ const struct ieee802154_addr src;
+ const struct ieee802154_addr dst;
};
-/* Equivalent of ipv4 struct ip
+/* Equivalent of ipv4 struct ipq
*/
struct lowpan_frag_queue {
struct inet_frag_queue q;
-
- u16 tag;
- u16 d_size;
- struct ieee802154_addr saddr;
- struct ieee802154_addr daddr;
};
-static inline u32 ieee802154_addr_hash(const struct ieee802154_addr *a)
-{
- switch (a->mode) {
- case IEEE802154_ADDR_LONG:
- return (((__force u64)a->extended_addr) >> 32) ^
- (((__force u64)a->extended_addr) & 0xffffffff);
- case IEEE802154_ADDR_SHORT:
- return (__force u32)(a->short_addr + (a->pan_id << 16));
- default:
- return 0;
- }
-}
-
int lowpan_frag_rcv(struct sk_buff *skb, const u8 frag_type);
void lowpan_net_frag_exit(void);
int lowpan_net_frag_init(void);
diff --git a/net/ieee802154/6lowpan/reassembly.c b/net/ieee802154/6lowpan/reassembly.c
index f85b08b..6fca755 100644
--- a/net/ieee802154/6lowpan/reassembly.c
+++ b/net/ieee802154/6lowpan/reassembly.c
@@ -37,47 +37,15 @@
static int lowpan_frag_reasm(struct lowpan_frag_queue *fq,
struct sk_buff *prev, struct net_device *ldev);
-static unsigned int lowpan_hash_frag(u16 tag, u16 d_size,
- const struct ieee802154_addr *saddr,
- const struct ieee802154_addr *daddr)
-{
- net_get_random_once(&lowpan_frags.rnd, sizeof(lowpan_frags.rnd));
- return jhash_3words(ieee802154_addr_hash(saddr),
- ieee802154_addr_hash(daddr),
- (__force u32)(tag + (d_size << 16)),
- lowpan_frags.rnd);
-}
-
-static unsigned int lowpan_hashfn(const struct inet_frag_queue *q)
-{
- const struct lowpan_frag_queue *fq;
-
- fq = container_of(q, struct lowpan_frag_queue, q);
- return lowpan_hash_frag(fq->tag, fq->d_size, &fq->saddr, &fq->daddr);
-}
-
-static bool lowpan_frag_match(const struct inet_frag_queue *q, const void *a)
-{
- const struct lowpan_frag_queue *fq;
- const struct lowpan_create_arg *arg = a;
-
- fq = container_of(q, struct lowpan_frag_queue, q);
- return fq->tag == arg->tag && fq->d_size == arg->d_size &&
- ieee802154_addr_equal(&fq->saddr, arg->src) &&
- ieee802154_addr_equal(&fq->daddr, arg->dst);
-}
-
static void lowpan_frag_init(struct inet_frag_queue *q, const void *a)
{
- const struct lowpan_create_arg *arg = a;
+ const struct frag_lowpan_compare_key *key = a;
struct lowpan_frag_queue *fq;
fq = container_of(q, struct lowpan_frag_queue, q);
- fq->tag = arg->tag;
- fq->d_size = arg->d_size;
- fq->saddr = *arg->src;
- fq->daddr = *arg->dst;
+ BUILD_BUG_ON(sizeof(*key) > sizeof(q->key));
+ memcpy(&q->key, key, sizeof(*key));
}
static void lowpan_frag_expire(unsigned long data)
@@ -93,10 +61,10 @@
if (fq->q.flags & INET_FRAG_COMPLETE)
goto out;
- inet_frag_kill(&fq->q, &lowpan_frags);
+ inet_frag_kill(&fq->q);
out:
spin_unlock(&fq->q.lock);
- inet_frag_put(&fq->q, &lowpan_frags);
+ inet_frag_put(&fq->q);
}
static inline struct lowpan_frag_queue *
@@ -104,25 +72,20 @@
const struct ieee802154_addr *src,
const struct ieee802154_addr *dst)
{
- struct inet_frag_queue *q;
- struct lowpan_create_arg arg;
- unsigned int hash;
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
+ struct frag_lowpan_compare_key key = {
+ .tag = cb->d_tag,
+ .d_size = cb->d_size,
+ .src = *src,
+ .dst = *dst,
+ };
+ struct inet_frag_queue *q;
- arg.tag = cb->d_tag;
- arg.d_size = cb->d_size;
- arg.src = src;
- arg.dst = dst;
-
- hash = lowpan_hash_frag(cb->d_tag, cb->d_size, src, dst);
-
- q = inet_frag_find(&ieee802154_lowpan->frags,
- &lowpan_frags, &arg, hash);
- if (IS_ERR_OR_NULL(q)) {
- inet_frag_maybe_warn_overflow(q, pr_fmt());
+ q = inet_frag_find(&ieee802154_lowpan->frags, &key);
+ if (!q)
return NULL;
- }
+
return container_of(q, struct lowpan_frag_queue, q);
}
@@ -229,7 +192,7 @@
struct sk_buff *fp, *head = fq->q.fragments;
int sum_truesize;
- inet_frag_kill(&fq->q, &lowpan_frags);
+ inet_frag_kill(&fq->q);
/* Make the one we just received the head. */
if (prev) {
@@ -437,7 +400,7 @@
ret = lowpan_frag_queue(fq, skb, frag_type);
spin_unlock(&fq->q.lock);
- inet_frag_put(&fq->q, &lowpan_frags);
+ inet_frag_put(&fq->q);
return ret;
}
@@ -447,24 +410,22 @@
}
#ifdef CONFIG_SYSCTL
-static int zero;
static struct ctl_table lowpan_frags_ns_ctl_table[] = {
{
.procname = "6lowpanfrag_high_thresh",
.data = &init_net.ieee802154_lowpan.frags.high_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
.extra1 = &init_net.ieee802154_lowpan.frags.low_thresh
},
{
.procname = "6lowpanfrag_low_thresh",
.data = &init_net.ieee802154_lowpan.frags.low_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .proc_handler = proc_doulongvec_minmax,
.extra2 = &init_net.ieee802154_lowpan.frags.high_thresh
},
{
@@ -580,14 +541,20 @@
{
struct netns_ieee802154_lowpan *ieee802154_lowpan =
net_ieee802154_lowpan(net);
+ int res;
ieee802154_lowpan->frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
ieee802154_lowpan->frags.low_thresh = IPV6_FRAG_LOW_THRESH;
ieee802154_lowpan->frags.timeout = IPV6_FRAG_TIMEOUT;
+ ieee802154_lowpan->frags.f = &lowpan_frags;
- inet_frags_init_net(&ieee802154_lowpan->frags);
-
- return lowpan_frags_ns_sysctl_register(net);
+ res = inet_frags_init_net(&ieee802154_lowpan->frags);
+ if (res < 0)
+ return res;
+ res = lowpan_frags_ns_sysctl_register(net);
+ if (res < 0)
+ inet_frags_exit_net(&ieee802154_lowpan->frags);
+ return res;
}
static void __net_exit lowpan_frags_exit_net(struct net *net)
@@ -596,7 +563,7 @@
net_ieee802154_lowpan(net);
lowpan_frags_ns_sysctl_unregister(net);
- inet_frags_exit_net(&ieee802154_lowpan->frags, &lowpan_frags);
+ inet_frags_exit_net(&ieee802154_lowpan->frags);
}
static struct pernet_operations lowpan_frags_ops = {
@@ -604,32 +571,63 @@
.exit = lowpan_frags_exit_net,
};
+static u32 lowpan_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ return jhash2(data,
+ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
+}
+
+static u32 lowpan_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
+
+ return jhash2((const u32 *)&fq->key,
+ sizeof(struct frag_lowpan_compare_key) / sizeof(u32), seed);
+}
+
+static int lowpan_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_lowpan_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static const struct rhashtable_params lowpan_rhash_params = {
+ .head_offset = offsetof(struct inet_frag_queue, node),
+ .hashfn = lowpan_key_hashfn,
+ .obj_hashfn = lowpan_obj_hashfn,
+ .obj_cmpfn = lowpan_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+
int __init lowpan_net_frag_init(void)
{
int ret;
+ lowpan_frags.constructor = lowpan_frag_init;
+ lowpan_frags.destructor = NULL;
+ lowpan_frags.qsize = sizeof(struct frag_queue);
+ lowpan_frags.frag_expire = lowpan_frag_expire;
+ lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
+ lowpan_frags.rhash_params = lowpan_rhash_params;
+ ret = inet_frags_init(&lowpan_frags);
+ if (ret)
+ goto out;
+
ret = lowpan_frags_sysctl_register();
if (ret)
- return ret;
+ goto err_sysctl;
ret = register_pernet_subsys(&lowpan_frags_ops);
if (ret)
goto err_pernet;
-
- lowpan_frags.hashfn = lowpan_hashfn;
- lowpan_frags.constructor = lowpan_frag_init;
- lowpan_frags.destructor = NULL;
- lowpan_frags.qsize = sizeof(struct frag_queue);
- lowpan_frags.match = lowpan_frag_match;
- lowpan_frags.frag_expire = lowpan_frag_expire;
- lowpan_frags.frags_cache_name = lowpan_frags_cache_name;
- ret = inet_frags_init(&lowpan_frags);
- if (ret)
- goto err_pernet;
-
+out:
return ret;
err_pernet:
lowpan_frags_sysctl_unregister();
+err_sysctl:
+ inet_frags_fini(&lowpan_frags);
return ret;
}
diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
index 90c91a7..275ef13 100644
--- a/net/ipv4/af_inet.c
+++ b/net/ipv4/af_inet.c
@@ -1315,6 +1315,7 @@
if (encap)
skb_reset_inner_headers(skb);
skb->network_header = (u8 *)iph - skb->head;
+ skb_reset_mac_len(skb);
} while ((skb = skb->next));
out:
diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
index 78ee2fc..8e556fa 100644
--- a/net/ipv4/fib_frontend.c
+++ b/net/ipv4/fib_frontend.c
@@ -1172,7 +1172,8 @@
static int fib_netdev_event(struct notifier_block *this, unsigned long event, void *ptr)
{
struct net_device *dev = netdev_notifier_info_to_dev(ptr);
- struct netdev_notifier_changeupper_info *info;
+ struct netdev_notifier_changeupper_info *upper_info = ptr;
+ struct netdev_notifier_info_ext *info_ext = ptr;
struct in_device *in_dev;
struct net *net = dev_net(dev);
unsigned int flags;
@@ -1207,16 +1208,19 @@
fib_sync_up(dev, RTNH_F_LINKDOWN);
else
fib_sync_down_dev(dev, event, false);
- /* fall through */
+ rt_cache_flush(net);
+ break;
case NETDEV_CHANGEMTU:
+ fib_sync_mtu(dev, info_ext->ext.mtu);
rt_cache_flush(net);
break;
case NETDEV_CHANGEUPPER:
- info = ptr;
+ upper_info = ptr;
/* flush all routes if dev is linked to or unlinked from
* an L3 master device (e.g., VRF)
*/
- if (info->upper_dev && netif_is_l3_master(info->upper_dev))
+ if (upper_info->upper_dev &&
+ netif_is_l3_master(upper_info->upper_dev))
fib_disable_ip(dev, NETDEV_DOWN, true);
break;
}
diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
index a88dab3..90c6540 100644
--- a/net/ipv4/fib_semantics.c
+++ b/net/ipv4/fib_semantics.c
@@ -1377,6 +1377,56 @@
return ret;
}
+/* Update the PMTU of exceptions when:
+ * - the new MTU of the first hop becomes smaller than the PMTU
+ * - the old MTU was the same as the PMTU, and it limited discovery of
+ * larger MTUs on the path. With that limit raised, we can now
+ * discover larger MTUs
+ * A special case is locked exceptions, for which the PMTU is smaller
+ * than the minimal accepted PMTU:
+ * - if the new MTU is greater than the PMTU, don't make any change
+ * - otherwise, unlock and set PMTU
+ */
+static void nh_update_mtu(struct fib_nh *nh, u32 new, u32 orig)
+{
+ struct fnhe_hash_bucket *bucket;
+ int i;
+
+ bucket = rcu_dereference_protected(nh->nh_exceptions, 1);
+ if (!bucket)
+ return;
+
+ for (i = 0; i < FNHE_HASH_SIZE; i++) {
+ struct fib_nh_exception *fnhe;
+
+ for (fnhe = rcu_dereference_protected(bucket[i].chain, 1);
+ fnhe;
+ fnhe = rcu_dereference_protected(fnhe->fnhe_next, 1)) {
+ if (fnhe->fnhe_mtu_locked) {
+ if (new <= fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ fnhe->fnhe_mtu_locked = false;
+ }
+ } else if (new < fnhe->fnhe_pmtu ||
+ orig == fnhe->fnhe_pmtu) {
+ fnhe->fnhe_pmtu = new;
+ }
+ }
+ }
+}
+
+void fib_sync_mtu(struct net_device *dev, u32 orig_mtu)
+{
+ unsigned int hash = fib_devindex_hashfn(dev->ifindex);
+ struct hlist_head *head = &fib_info_devhash[hash];
+ struct fib_nh *nh;
+
+ hlist_for_each_entry(nh, head, nh_hash) {
+ if (nh->nh_dev == dev)
+ nh_update_mtu(nh, dev->mtu, orig_mtu);
+ }
+}
+
/* Event force Flags Description
* NETDEV_CHANGE 0 LINKDOWN Carrier OFF, not for scope host
* NETDEV_DOWN 0 LINKDOWN|DEAD Link down, not for scope host
diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
index 11558ca..9453180 100644
--- a/net/ipv4/inet_connection_sock.c
+++ b/net/ipv4/inet_connection_sock.c
@@ -417,7 +417,8 @@
struct ip_options_rcu *opt;
struct rtable *rt;
- opt = ireq_opt_deref(ireq);
+ rcu_read_lock();
+ opt = rcu_dereference(ireq->ireq_opt);
flowi4_init_output(fl4, ireq->ir_iif, ireq->ir_mark,
RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE,
@@ -431,11 +432,13 @@
goto no_route;
if (opt && opt->opt.is_strictroute && rt->rt_uses_gateway)
goto route_err;
+ rcu_read_unlock();
return &rt->dst;
route_err:
ip_rt_put(rt);
no_route:
+ rcu_read_unlock();
__IP_INC_STATS(net, IPSTATS_MIB_OUTNOROUTES);
return NULL;
}
diff --git a/net/ipv4/inet_fragment.c b/net/ipv4/inet_fragment.c
index f8b41aa..8323d33 100644
--- a/net/ipv4/inet_fragment.c
+++ b/net/ipv4/inet_fragment.c
@@ -25,12 +25,6 @@
#include <net/inet_frag.h>
#include <net/inet_ecn.h>
-#define INETFRAGS_EVICT_BUCKETS 128
-#define INETFRAGS_EVICT_MAX 512
-
-/* don't rebuild inetfrag table with new secret more often than this */
-#define INETFRAGS_MIN_REBUILD_INTERVAL (5 * HZ)
-
/* Given the OR values of all fragments, apply RFC 3168 5.3 requirements
* Value : 0xff if frame should be dropped.
* 0 or INET_ECN_CE value, to be ORed in to final iph->tos field
@@ -52,157 +46,8 @@
};
EXPORT_SYMBOL(ip_frag_ecn_table);
-static unsigned int
-inet_frag_hashfn(const struct inet_frags *f, const struct inet_frag_queue *q)
-{
- return f->hashfn(q) & (INETFRAGS_HASHSZ - 1);
-}
-
-static bool inet_frag_may_rebuild(struct inet_frags *f)
-{
- return time_after(jiffies,
- f->last_rebuild_jiffies + INETFRAGS_MIN_REBUILD_INTERVAL);
-}
-
-static void inet_frag_secret_rebuild(struct inet_frags *f)
-{
- int i;
-
- write_seqlock_bh(&f->rnd_seqlock);
-
- if (!inet_frag_may_rebuild(f))
- goto out;
-
- get_random_bytes(&f->rnd, sizeof(u32));
-
- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
- struct inet_frag_bucket *hb;
- struct inet_frag_queue *q;
- struct hlist_node *n;
-
- hb = &f->hash[i];
- spin_lock(&hb->chain_lock);
-
- hlist_for_each_entry_safe(q, n, &hb->chain, list) {
- unsigned int hval = inet_frag_hashfn(f, q);
-
- if (hval != i) {
- struct inet_frag_bucket *hb_dest;
-
- hlist_del(&q->list);
-
- /* Relink to new hash chain. */
- hb_dest = &f->hash[hval];
-
- /* This is the only place where we take
- * another chain_lock while already holding
- * one. As this will not run concurrently,
- * we cannot deadlock on hb_dest lock below, if its
- * already locked it will be released soon since
- * other caller cannot be waiting for hb lock
- * that we've taken above.
- */
- spin_lock_nested(&hb_dest->chain_lock,
- SINGLE_DEPTH_NESTING);
- hlist_add_head(&q->list, &hb_dest->chain);
- spin_unlock(&hb_dest->chain_lock);
- }
- }
- spin_unlock(&hb->chain_lock);
- }
-
- f->rebuild = false;
- f->last_rebuild_jiffies = jiffies;
-out:
- write_sequnlock_bh(&f->rnd_seqlock);
-}
-
-static bool inet_fragq_should_evict(const struct inet_frag_queue *q)
-{
- if (!hlist_unhashed(&q->list_evictor))
- return false;
-
- return q->net->low_thresh == 0 ||
- frag_mem_limit(q->net) >= q->net->low_thresh;
-}
-
-static unsigned int
-inet_evict_bucket(struct inet_frags *f, struct inet_frag_bucket *hb)
-{
- struct inet_frag_queue *fq;
- struct hlist_node *n;
- unsigned int evicted = 0;
- HLIST_HEAD(expired);
-
- spin_lock(&hb->chain_lock);
-
- hlist_for_each_entry_safe(fq, n, &hb->chain, list) {
- if (!inet_fragq_should_evict(fq))
- continue;
-
- if (!del_timer(&fq->timer))
- continue;
-
- hlist_add_head(&fq->list_evictor, &expired);
- ++evicted;
- }
-
- spin_unlock(&hb->chain_lock);
-
- hlist_for_each_entry_safe(fq, n, &expired, list_evictor)
- f->frag_expire((unsigned long) fq);
-
- return evicted;
-}
-
-static void inet_frag_worker(struct work_struct *work)
-{
- unsigned int budget = INETFRAGS_EVICT_BUCKETS;
- unsigned int i, evicted = 0;
- struct inet_frags *f;
-
- f = container_of(work, struct inet_frags, frags_work);
-
- BUILD_BUG_ON(INETFRAGS_EVICT_BUCKETS >= INETFRAGS_HASHSZ);
-
- local_bh_disable();
-
- for (i = ACCESS_ONCE(f->next_bucket); budget; --budget) {
- evicted += inet_evict_bucket(f, &f->hash[i]);
- i = (i + 1) & (INETFRAGS_HASHSZ - 1);
- if (evicted > INETFRAGS_EVICT_MAX)
- break;
- }
-
- f->next_bucket = i;
-
- local_bh_enable();
-
- if (f->rebuild && inet_frag_may_rebuild(f))
- inet_frag_secret_rebuild(f);
-}
-
-static void inet_frag_schedule_worker(struct inet_frags *f)
-{
- if (unlikely(!work_pending(&f->frags_work)))
- schedule_work(&f->frags_work);
-}
-
int inet_frags_init(struct inet_frags *f)
{
- int i;
-
- INIT_WORK(&f->frags_work, inet_frag_worker);
-
- for (i = 0; i < INETFRAGS_HASHSZ; i++) {
- struct inet_frag_bucket *hb = &f->hash[i];
-
- spin_lock_init(&hb->chain_lock);
- INIT_HLIST_HEAD(&hb->chain);
- }
-
- seqlock_init(&f->rnd_seqlock);
- f->last_rebuild_jiffies = 0;
f->frags_cachep = kmem_cache_create(f->frags_cache_name, f->qsize, 0, 0,
NULL);
if (!f->frags_cachep)
@@ -214,83 +59,75 @@
void inet_frags_fini(struct inet_frags *f)
{
- cancel_work_sync(&f->frags_work);
+ /* We must wait that all inet_frag_destroy_rcu() have completed. */
+ rcu_barrier();
+
kmem_cache_destroy(f->frags_cachep);
+ f->frags_cachep = NULL;
}
EXPORT_SYMBOL(inet_frags_fini);
-void inet_frags_exit_net(struct netns_frags *nf, struct inet_frags *f)
+static void inet_frags_free_cb(void *ptr, void *arg)
{
- unsigned int seq;
- int i;
+ struct inet_frag_queue *fq = ptr;
- nf->low_thresh = 0;
+ /* If we can not cancel the timer, it means this frag_queue
+ * is already disappearing, we have nothing to do.
+ * Otherwise, we own a refcount until the end of this function.
+ */
+ if (!del_timer(&fq->timer))
+ return;
-evict_again:
- local_bh_disable();
- seq = read_seqbegin(&f->rnd_seqlock);
+ spin_lock_bh(&fq->lock);
+ if (!(fq->flags & INET_FRAG_COMPLETE)) {
+ fq->flags |= INET_FRAG_COMPLETE;
+ atomic_dec(&fq->refcnt);
+ }
+ spin_unlock_bh(&fq->lock);
- for (i = 0; i < INETFRAGS_HASHSZ ; i++)
- inet_evict_bucket(f, &f->hash[i]);
+ inet_frag_put(fq);
+}
- local_bh_enable();
- cond_resched();
+void inet_frags_exit_net(struct netns_frags *nf)
+{
+ nf->low_thresh = 0; /* prevent creation of new frags */
- if (read_seqretry(&f->rnd_seqlock, seq) ||
- sum_frag_mem_limit(nf))
- goto evict_again;
+ rhashtable_free_and_destroy(&nf->rhashtable, inet_frags_free_cb, NULL);
}
EXPORT_SYMBOL(inet_frags_exit_net);
-static struct inet_frag_bucket *
-get_frag_bucket_locked(struct inet_frag_queue *fq, struct inet_frags *f)
-__acquires(hb->chain_lock)
-{
- struct inet_frag_bucket *hb;
- unsigned int seq, hash;
-
- restart:
- seq = read_seqbegin(&f->rnd_seqlock);
-
- hash = inet_frag_hashfn(f, fq);
- hb = &f->hash[hash];
-
- spin_lock(&hb->chain_lock);
- if (read_seqretry(&f->rnd_seqlock, seq)) {
- spin_unlock(&hb->chain_lock);
- goto restart;
- }
-
- return hb;
-}
-
-static inline void fq_unlink(struct inet_frag_queue *fq, struct inet_frags *f)
-{
- struct inet_frag_bucket *hb;
-
- hb = get_frag_bucket_locked(fq, f);
- hlist_del(&fq->list);
- fq->flags |= INET_FRAG_COMPLETE;
- spin_unlock(&hb->chain_lock);
-}
-
-void inet_frag_kill(struct inet_frag_queue *fq, struct inet_frags *f)
+void inet_frag_kill(struct inet_frag_queue *fq)
{
if (del_timer(&fq->timer))
atomic_dec(&fq->refcnt);
if (!(fq->flags & INET_FRAG_COMPLETE)) {
- fq_unlink(fq, f);
+ struct netns_frags *nf = fq->net;
+
+ fq->flags |= INET_FRAG_COMPLETE;
+ rhashtable_remove_fast(&nf->rhashtable, &fq->node, nf->f->rhash_params);
atomic_dec(&fq->refcnt);
}
}
EXPORT_SYMBOL(inet_frag_kill);
-void inet_frag_destroy(struct inet_frag_queue *q, struct inet_frags *f)
+static void inet_frag_destroy_rcu(struct rcu_head *head)
+{
+ struct inet_frag_queue *q = container_of(head, struct inet_frag_queue,
+ rcu);
+ struct inet_frags *f = q->net->f;
+
+ if (f->destructor)
+ f->destructor(q);
+ kmem_cache_free(f->frags_cachep, q);
+}
+
+void inet_frag_destroy(struct inet_frag_queue *q)
{
struct sk_buff *fp;
struct netns_frags *nf;
unsigned int sum, sum_truesize = 0;
+ struct inet_frags *f;
WARN_ON(!(q->flags & INET_FRAG_COMPLETE));
WARN_ON(del_timer(&q->timer) != 0);
@@ -298,64 +135,35 @@
/* Release all fragment data. */
fp = q->fragments;
nf = q->net;
- while (fp) {
- struct sk_buff *xp = fp->next;
+ f = nf->f;
+ if (fp) {
+ do {
+ struct sk_buff *xp = fp->next;
- sum_truesize += fp->truesize;
- kfree_skb(fp);
- fp = xp;
+ sum_truesize += fp->truesize;
+ kfree_skb(fp);
+ fp = xp;
+ } while (fp);
+ } else {
+ sum_truesize = inet_frag_rbtree_purge(&q->rb_fragments);
}
sum = sum_truesize + f->qsize;
- if (f->destructor)
- f->destructor(q);
- kmem_cache_free(f->frags_cachep, q);
+ call_rcu(&q->rcu, inet_frag_destroy_rcu);
sub_frag_mem_limit(nf, sum);
}
EXPORT_SYMBOL(inet_frag_destroy);
-static struct inet_frag_queue *inet_frag_intern(struct netns_frags *nf,
- struct inet_frag_queue *qp_in,
- struct inet_frags *f,
- void *arg)
-{
- struct inet_frag_bucket *hb = get_frag_bucket_locked(qp_in, f);
- struct inet_frag_queue *qp;
-
-#ifdef CONFIG_SMP
- /* With SMP race we have to recheck hash table, because
- * such entry could have been created on other cpu before
- * we acquired hash bucket lock.
- */
- hlist_for_each_entry(qp, &hb->chain, list) {
- if (qp->net == nf && f->match(qp, arg)) {
- atomic_inc(&qp->refcnt);
- spin_unlock(&hb->chain_lock);
- qp_in->flags |= INET_FRAG_COMPLETE;
- inet_frag_put(qp_in, f);
- return qp;
- }
- }
-#endif
- qp = qp_in;
- if (!mod_timer(&qp->timer, jiffies + nf->timeout))
- atomic_inc(&qp->refcnt);
-
- atomic_inc(&qp->refcnt);
- hlist_add_head(&qp->list, &hb->chain);
-
- spin_unlock(&hb->chain_lock);
-
- return qp;
-}
-
static struct inet_frag_queue *inet_frag_alloc(struct netns_frags *nf,
struct inet_frags *f,
void *arg)
{
struct inet_frag_queue *q;
+ if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh)
+ return NULL;
+
q = kmem_cache_zalloc(f->frags_cachep, GFP_ATOMIC);
if (!q)
return NULL;
@@ -366,75 +174,50 @@
setup_timer(&q->timer, f->frag_expire, (unsigned long)q);
spin_lock_init(&q->lock);
- atomic_set(&q->refcnt, 1);
+ atomic_set(&q->refcnt, 3);
return q;
}
static struct inet_frag_queue *inet_frag_create(struct netns_frags *nf,
- struct inet_frags *f,
void *arg)
{
+ struct inet_frags *f = nf->f;
struct inet_frag_queue *q;
+ int err;
q = inet_frag_alloc(nf, f, arg);
if (!q)
return NULL;
- return inet_frag_intern(nf, q, f, arg);
-}
+ mod_timer(&q->timer, jiffies + nf->timeout);
-struct inet_frag_queue *inet_frag_find(struct netns_frags *nf,
- struct inet_frags *f, void *key,
- unsigned int hash)
-{
- struct inet_frag_bucket *hb;
- struct inet_frag_queue *q;
- int depth = 0;
-
- if (!nf->high_thresh || frag_mem_limit(nf) > nf->high_thresh) {
- inet_frag_schedule_worker(f);
+ err = rhashtable_insert_fast(&nf->rhashtable, &q->node,
+ f->rhash_params);
+ if (err < 0) {
+ q->flags |= INET_FRAG_COMPLETE;
+ inet_frag_kill(q);
+ inet_frag_destroy(q);
return NULL;
}
+ return q;
+}
+EXPORT_SYMBOL(inet_frag_create);
- if (frag_mem_limit(nf) > nf->low_thresh)
- inet_frag_schedule_worker(f);
+/* TODO : call from rcu_read_lock() and no longer use refcount_inc_not_zero() */
+struct inet_frag_queue *inet_frag_find(struct netns_frags *nf, void *key)
+{
+ struct inet_frag_queue *fq;
- hash &= (INETFRAGS_HASHSZ - 1);
- hb = &f->hash[hash];
-
- spin_lock(&hb->chain_lock);
- hlist_for_each_entry(q, &hb->chain, list) {
- if (q->net == nf && f->match(q, key)) {
- atomic_inc(&q->refcnt);
- spin_unlock(&hb->chain_lock);
- return q;
- }
- depth++;
+ rcu_read_lock();
+ fq = rhashtable_lookup(&nf->rhashtable, key, nf->f->rhash_params);
+ if (fq) {
+ if (!atomic_inc_not_zero(&fq->refcnt))
+ fq = NULL;
+ rcu_read_unlock();
+ return fq;
}
- spin_unlock(&hb->chain_lock);
-
- if (depth <= INETFRAGS_MAXDEPTH)
- return inet_frag_create(nf, f, key);
-
- if (inet_frag_may_rebuild(f)) {
- if (!f->rebuild)
- f->rebuild = true;
- inet_frag_schedule_worker(f);
- }
-
- return ERR_PTR(-ENOBUFS);
+ rcu_read_unlock();
+ return inet_frag_create(nf, key);
}
EXPORT_SYMBOL(inet_frag_find);
-
-void inet_frag_maybe_warn_overflow(struct inet_frag_queue *q,
- const char *prefix)
-{
- static const char msg[] = "inet_frag_find: Fragment hash bucket"
- " list length grew over limit " __stringify(INETFRAGS_MAXDEPTH)
- ". Dropping fragment.\n";
-
- if (PTR_ERR(q) == -ENOBUFS)
- net_dbg_ratelimited("%s%s", prefix, msg);
-}
-EXPORT_SYMBOL(inet_frag_maybe_warn_overflow);
diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
index 752711c..cc8c6ac 100644
--- a/net/ipv4/ip_fragment.c
+++ b/net/ipv4/ip_fragment.c
@@ -56,27 +56,64 @@
*/
static const char ip_frag_cache_name[] = "ip4-frags";
-struct ipfrag_skb_cb
-{
+/* Use skb->cb to track consecutive/adjacent fragments coming at
+ * the end of the queue. Nodes in the rb-tree queue will
+ * contain "runs" of one or more adjacent fragments.
+ *
+ * Invariants:
+ * - next_frag is NULL at the tail of a "run";
+ * - the head of a "run" has the sum of all fragment lengths in frag_run_len.
+ */
+struct ipfrag_skb_cb {
struct inet_skb_parm h;
- int offset;
+ struct sk_buff *next_frag;
+ int frag_run_len;
};
-#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+#define FRAG_CB(skb) ((struct ipfrag_skb_cb *)((skb)->cb))
+
+static void ip4_frag_init_run(struct sk_buff *skb)
+{
+ BUILD_BUG_ON(sizeof(struct ipfrag_skb_cb) > sizeof(skb->cb));
+
+ FRAG_CB(skb)->next_frag = NULL;
+ FRAG_CB(skb)->frag_run_len = skb->len;
+}
+
+/* Append skb to the last "run". */
+static void ip4_frag_append_to_last_run(struct inet_frag_queue *q,
+ struct sk_buff *skb)
+{
+ RB_CLEAR_NODE(&skb->rbnode);
+ FRAG_CB(skb)->next_frag = NULL;
+
+ FRAG_CB(q->last_run_head)->frag_run_len += skb->len;
+ FRAG_CB(q->fragments_tail)->next_frag = skb;
+ q->fragments_tail = skb;
+}
+
+/* Create a new "run" with the skb. */
+static void ip4_frag_create_run(struct inet_frag_queue *q, struct sk_buff *skb)
+{
+ if (q->last_run_head)
+ rb_link_node(&skb->rbnode, &q->last_run_head->rbnode,
+ &q->last_run_head->rbnode.rb_right);
+ else
+ rb_link_node(&skb->rbnode, NULL, &q->rb_fragments.rb_node);
+ rb_insert_color(&skb->rbnode, &q->rb_fragments);
+
+ ip4_frag_init_run(skb);
+ q->fragments_tail = skb;
+ q->last_run_head = skb;
+}
/* Describe an entry in the "incomplete datagrams" queue. */
struct ipq {
struct inet_frag_queue q;
- u32 user;
- __be32 saddr;
- __be32 daddr;
- __be16 id;
- u8 protocol;
u8 ecn; /* RFC3168 support */
u16 max_df_size; /* largest frag with DF set seen */
int iif;
- int vif; /* L3 master device index */
unsigned int rid;
struct inet_peer *peer;
};
@@ -88,49 +125,9 @@
static struct inet_frags ip4_frags;
-int ip_frag_mem(struct net *net)
-{
- return sum_frag_mem_limit(&net->ipv4.frags);
-}
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev);
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
- struct net_device *dev);
-
-struct ip4_create_arg {
- struct iphdr *iph;
- u32 user;
- int vif;
-};
-
-static unsigned int ipqhashfn(__be16 id, __be32 saddr, __be32 daddr, u8 prot)
-{
- net_get_random_once(&ip4_frags.rnd, sizeof(ip4_frags.rnd));
- return jhash_3words((__force u32)id << 16 | prot,
- (__force u32)saddr, (__force u32)daddr,
- ip4_frags.rnd);
-}
-
-static unsigned int ip4_hashfn(const struct inet_frag_queue *q)
-{
- const struct ipq *ipq;
-
- ipq = container_of(q, struct ipq, q);
- return ipqhashfn(ipq->id, ipq->saddr, ipq->daddr, ipq->protocol);
-}
-
-static bool ip4_frag_match(const struct inet_frag_queue *q, const void *a)
-{
- const struct ipq *qp;
- const struct ip4_create_arg *arg = a;
-
- qp = container_of(q, struct ipq, q);
- return qp->id == arg->iph->id &&
- qp->saddr == arg->iph->saddr &&
- qp->daddr == arg->iph->daddr &&
- qp->protocol == arg->iph->protocol &&
- qp->user == arg->user &&
- qp->vif == arg->vif;
-}
static void ip4_frag_init(struct inet_frag_queue *q, const void *a)
{
@@ -139,17 +136,12 @@
frags);
struct net *net = container_of(ipv4, struct net, ipv4);
- const struct ip4_create_arg *arg = a;
+ const struct frag_v4_compare_key *key = a;
- qp->protocol = arg->iph->protocol;
- qp->id = arg->iph->id;
- qp->ecn = ip4_frag_ecn(arg->iph->tos);
- qp->saddr = arg->iph->saddr;
- qp->daddr = arg->iph->daddr;
- qp->vif = arg->vif;
- qp->user = arg->user;
+ q->key.v4 = *key;
+ qp->ecn = 0;
qp->peer = q->net->max_dist ?
- inet_getpeer_v4(net->ipv4.peers, arg->iph->saddr, arg->vif, 1) :
+ inet_getpeer_v4(net->ipv4.peers, key->saddr, key->vif, 1) :
NULL;
}
@@ -167,7 +159,7 @@
static void ipq_put(struct ipq *ipq)
{
- inet_frag_put(&ipq->q, &ip4_frags);
+ inet_frag_put(&ipq->q);
}
/* Kill ipq entry. It is not destroyed immediately,
@@ -175,7 +167,7 @@
*/
static void ipq_kill(struct ipq *ipq)
{
- inet_frag_kill(&ipq->q, &ip4_frags);
+ inet_frag_kill(&ipq->q);
}
static bool frag_expire_skip_icmp(u32 user)
@@ -192,8 +184,11 @@
*/
static void ip_expire(unsigned long arg)
{
- struct ipq *qp;
+ const struct iphdr *iph;
+ struct sk_buff *head = NULL;
struct net *net;
+ struct ipq *qp;
+ int err;
qp = container_of((struct inet_frag_queue *) arg, struct ipq, q);
net = container_of(qp->q.net, struct net, ipv4.frags);
@@ -206,51 +201,65 @@
ipq_kill(qp);
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
+ __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
- if (!inet_frag_evicting(&qp->q)) {
- struct sk_buff *clone, *head = qp->q.fragments;
- const struct iphdr *iph;
- int err;
+ if (!(qp->q.flags & INET_FRAG_FIRST_IN))
+ goto out;
- __IP_INC_STATS(net, IPSTATS_MIB_REASMTIMEOUT);
-
- if (!(qp->q.flags & INET_FRAG_FIRST_IN) || !qp->q.fragments)
+ /* sk_buff::dev and sk_buff::rbnode are unionized. So we
+ * pull the head out of the tree in order to be able to
+ * deal with head->dev.
+ */
+ if (qp->q.fragments) {
+ head = qp->q.fragments;
+ qp->q.fragments = head->next;
+ } else {
+ head = skb_rb_first(&qp->q.rb_fragments);
+ if (!head)
goto out;
-
- head->dev = dev_get_by_index_rcu(net, qp->iif);
- if (!head->dev)
- goto out;
-
-
- /* skb has no dst, perform route lookup again */
- iph = ip_hdr(head);
- err = ip_route_input_noref(head, iph->daddr, iph->saddr,
- iph->tos, head->dev);
- if (err)
- goto out;
-
- /* Only an end host needs to send an ICMP
- * "Fragment Reassembly Timeout" message, per RFC792.
- */
- if (frag_expire_skip_icmp(qp->user) &&
- (skb_rtable(head)->rt_type != RTN_LOCAL))
- goto out;
-
- clone = skb_clone(head, GFP_ATOMIC);
-
- /* Send an ICMP "Fragment Reassembly Timeout" message. */
- if (clone) {
- spin_unlock(&qp->q.lock);
- icmp_send(clone, ICMP_TIME_EXCEEDED,
- ICMP_EXC_FRAGTIME, 0);
- consume_skb(clone);
- goto out_rcu_unlock;
- }
+ if (FRAG_CB(head)->next_frag)
+ rb_replace_node(&head->rbnode,
+ &FRAG_CB(head)->next_frag->rbnode,
+ &qp->q.rb_fragments);
+ else
+ rb_erase(&head->rbnode, &qp->q.rb_fragments);
+ memset(&head->rbnode, 0, sizeof(head->rbnode));
+ barrier();
}
+ if (head == qp->q.fragments_tail)
+ qp->q.fragments_tail = NULL;
+
+ sub_frag_mem_limit(qp->q.net, head->truesize);
+
+ head->dev = dev_get_by_index_rcu(net, qp->iif);
+ if (!head->dev)
+ goto out;
+
+
+ /* skb has no dst, perform route lookup again */
+ iph = ip_hdr(head);
+ err = ip_route_input_noref(head, iph->daddr, iph->saddr,
+ iph->tos, head->dev);
+ if (err)
+ goto out;
+
+ /* Only an end host needs to send an ICMP
+ * "Fragment Reassembly Timeout" message, per RFC792.
+ */
+ if (frag_expire_skip_icmp(qp->q.key.v4.user) &&
+ (skb_rtable(head)->rt_type != RTN_LOCAL))
+ goto out;
+
+ spin_unlock(&qp->q.lock);
+ icmp_send(head, ICMP_TIME_EXCEEDED, ICMP_EXC_FRAGTIME, 0);
+ goto out_rcu_unlock;
+
out:
spin_unlock(&qp->q.lock);
out_rcu_unlock:
rcu_read_unlock();
+ if (head)
+ kfree_skb(head);
ipq_put(qp);
}
@@ -260,21 +269,20 @@
static struct ipq *ip_find(struct net *net, struct iphdr *iph,
u32 user, int vif)
{
+ struct frag_v4_compare_key key = {
+ .saddr = iph->saddr,
+ .daddr = iph->daddr,
+ .user = user,
+ .vif = vif,
+ .id = iph->id,
+ .protocol = iph->protocol,
+ };
struct inet_frag_queue *q;
- struct ip4_create_arg arg;
- unsigned int hash;
- arg.iph = iph;
- arg.user = user;
- arg.vif = vif;
-
- hash = ipqhashfn(iph->id, iph->saddr, iph->daddr, iph->protocol);
-
- q = inet_frag_find(&net->ipv4.frags, &ip4_frags, &arg, hash);
- if (IS_ERR_OR_NULL(q)) {
- inet_frag_maybe_warn_overflow(q, pr_fmt());
+ q = inet_frag_find(&net->ipv4.frags, &key);
+ if (!q)
return NULL;
- }
+
return container_of(q, struct ipq, q);
}
@@ -294,7 +302,7 @@
end = atomic_inc_return(&peer->rid);
qp->rid = end;
- rc = qp->q.fragments && (end - start) > max;
+ rc = qp->q.fragments_tail && (end - start) > max;
if (rc) {
struct net *net;
@@ -308,7 +316,6 @@
static int ip_frag_reinit(struct ipq *qp)
{
- struct sk_buff *fp;
unsigned int sum_truesize = 0;
if (!mod_timer(&qp->q.timer, jiffies + qp->q.net->timeout)) {
@@ -316,21 +323,16 @@
return -ETIMEDOUT;
}
- fp = qp->q.fragments;
- do {
- struct sk_buff *xp = fp->next;
-
- sum_truesize += fp->truesize;
- kfree_skb(fp);
- fp = xp;
- } while (fp);
+ sum_truesize = inet_frag_rbtree_purge(&qp->q.rb_fragments);
sub_frag_mem_limit(qp->q.net, sum_truesize);
qp->q.flags = 0;
qp->q.len = 0;
qp->q.meat = 0;
qp->q.fragments = NULL;
+ qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL;
+ qp->q.last_run_head = NULL;
qp->iif = 0;
qp->ecn = 0;
@@ -340,7 +342,9 @@
/* Add new segment to existing queue. */
static int ip_frag_queue(struct ipq *qp, struct sk_buff *skb)
{
- struct sk_buff *prev, *next;
+ struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
+ struct rb_node **rbn, *parent;
+ struct sk_buff *skb1, *prev_tail;
struct net_device *dev;
unsigned int fragsize;
int flags, offset;
@@ -403,99 +407,61 @@
if (err)
goto err;
- /* Find out which fragments are in front and at the back of us
- * in the chain of fragments so far. We must know where to put
- * this fragment, right?
- */
- prev = qp->q.fragments_tail;
- if (!prev || FRAG_CB(prev)->offset < offset) {
- next = NULL;
- goto found;
- }
- prev = NULL;
- for (next = qp->q.fragments; next != NULL; next = next->next) {
- if (FRAG_CB(next)->offset >= offset)
- break; /* bingo! */
- prev = next;
- }
-
-found:
- /* We found where to put this one. Check for overlap with
- * preceding fragment, and, if needed, align things so that
- * any overlaps are eliminated.
- */
- if (prev) {
- int i = (FRAG_CB(prev)->offset + prev->len) - offset;
-
- if (i > 0) {
- offset += i;
- err = -EINVAL;
- if (end <= offset)
- goto err;
- err = -ENOMEM;
- if (!pskb_pull(skb, i))
- goto err;
- if (skb->ip_summed != CHECKSUM_UNNECESSARY)
- skb->ip_summed = CHECKSUM_NONE;
- }
- }
-
- err = -ENOMEM;
-
- while (next && FRAG_CB(next)->offset < end) {
- int i = end - FRAG_CB(next)->offset; /* overlap is 'i' bytes */
-
- if (i < next->len) {
- int delta = -next->truesize;
-
- /* Eat head of the next overlapped fragment
- * and leave the loop. The next ones cannot overlap.
- */
- if (!pskb_pull(next, i))
- goto err;
- delta += next->truesize;
- if (delta)
- add_frag_mem_limit(qp->q.net, delta);
- FRAG_CB(next)->offset += i;
- qp->q.meat -= i;
- if (next->ip_summed != CHECKSUM_UNNECESSARY)
- next->ip_summed = CHECKSUM_NONE;
- break;
- } else {
- struct sk_buff *free_it = next;
-
- /* Old fragment is completely overridden with
- * new one drop it.
- */
- next = next->next;
-
- if (prev)
- prev->next = next;
- else
- qp->q.fragments = next;
-
- qp->q.meat -= free_it->len;
- sub_frag_mem_limit(qp->q.net, free_it->truesize);
- kfree_skb(free_it);
- }
- }
-
- FRAG_CB(skb)->offset = offset;
-
- /* Insert this fragment in the chain of fragments. */
- skb->next = next;
- if (!next)
- qp->q.fragments_tail = skb;
- if (prev)
- prev->next = skb;
- else
- qp->q.fragments = skb;
-
+ /* Note : skb->rbnode and skb->dev share the same location. */
dev = skb->dev;
- if (dev) {
- qp->iif = dev->ifindex;
- skb->dev = NULL;
+ /* Makes sure compiler wont do silly aliasing games */
+ barrier();
+
+ /* RFC5722, Section 4, amended by Errata ID : 3089
+ * When reassembling an IPv6 datagram, if
+ * one or more its constituent fragments is determined to be an
+ * overlapping fragment, the entire datagram (and any constituent
+ * fragments) MUST be silently discarded.
+ *
+ * We do the same here for IPv4 (and increment an snmp counter).
+ */
+
+ /* Find out where to put this fragment. */
+ prev_tail = qp->q.fragments_tail;
+ if (!prev_tail)
+ ip4_frag_create_run(&qp->q, skb); /* First fragment. */
+ else if (prev_tail->ip_defrag_offset + prev_tail->len < end) {
+ /* This is the common case: skb goes to the end. */
+ /* Detect and discard overlaps. */
+ if (offset < prev_tail->ip_defrag_offset + prev_tail->len)
+ goto discard_qp;
+ if (offset == prev_tail->ip_defrag_offset + prev_tail->len)
+ ip4_frag_append_to_last_run(&qp->q, skb);
+ else
+ ip4_frag_create_run(&qp->q, skb);
+ } else {
+ /* Binary search. Note that skb can become the first fragment,
+ * but not the last (covered above).
+ */
+ rbn = &qp->q.rb_fragments.rb_node;
+ do {
+ parent = *rbn;
+ skb1 = rb_to_skb(parent);
+ if (end <= skb1->ip_defrag_offset)
+ rbn = &parent->rb_left;
+ else if (offset >= skb1->ip_defrag_offset +
+ FRAG_CB(skb1)->frag_run_len)
+ rbn = &parent->rb_right;
+ else /* Found an overlap with skb1. */
+ goto discard_qp;
+ } while (*rbn);
+ /* Here we have parent properly set, and rbn pointing to
+ * one of its NULL left/right children. Insert skb.
+ */
+ ip4_frag_init_run(skb);
+ rb_link_node(&skb->rbnode, parent, rbn);
+ rb_insert_color(&skb->rbnode, &qp->q.rb_fragments);
}
+
+ if (dev)
+ qp->iif = dev->ifindex;
+ skb->ip_defrag_offset = offset;
+
qp->q.stamp = skb->tstamp;
qp->q.meat += skb->len;
qp->ecn |= ecn;
@@ -517,7 +483,7 @@
unsigned long orefdst = skb->_skb_refdst;
skb->_skb_refdst = 0UL;
- err = ip_frag_reasm(qp, prev, dev);
+ err = ip_frag_reasm(qp, skb, prev_tail, dev);
skb->_skb_refdst = orefdst;
return err;
}
@@ -525,20 +491,24 @@
skb_dst_drop(skb);
return -EINPROGRESS;
+discard_qp:
+ inet_frag_kill(&qp->q);
+ err = -EINVAL;
+ __IP_INC_STATS(net, IPSTATS_MIB_REASM_OVERLAPS);
err:
kfree_skb(skb);
return err;
}
-
/* Build a new IP datagram from all its fragments. */
-
-static int ip_frag_reasm(struct ipq *qp, struct sk_buff *prev,
- struct net_device *dev)
+static int ip_frag_reasm(struct ipq *qp, struct sk_buff *skb,
+ struct sk_buff *prev_tail, struct net_device *dev)
{
struct net *net = container_of(qp->q.net, struct net, ipv4.frags);
struct iphdr *iph;
- struct sk_buff *fp, *head = qp->q.fragments;
+ struct sk_buff *fp, *head = skb_rb_first(&qp->q.rb_fragments);
+ struct sk_buff **nextp; /* To build frag_list. */
+ struct rb_node *rbn;
int len;
int ihlen;
int err;
@@ -552,26 +522,27 @@
goto out_fail;
}
/* Make the one we just received the head. */
- if (prev) {
- head = prev->next;
- fp = skb_clone(head, GFP_ATOMIC);
+ if (head != skb) {
+ fp = skb_clone(skb, GFP_ATOMIC);
if (!fp)
goto out_nomem;
-
- fp->next = head->next;
- if (!fp->next)
+ FRAG_CB(fp)->next_frag = FRAG_CB(skb)->next_frag;
+ if (RB_EMPTY_NODE(&skb->rbnode))
+ FRAG_CB(prev_tail)->next_frag = fp;
+ else
+ rb_replace_node(&skb->rbnode, &fp->rbnode,
+ &qp->q.rb_fragments);
+ if (qp->q.fragments_tail == skb)
qp->q.fragments_tail = fp;
- prev->next = fp;
-
- skb_morph(head, qp->q.fragments);
- head->next = qp->q.fragments->next;
-
- consume_skb(qp->q.fragments);
- qp->q.fragments = head;
+ skb_morph(skb, head);
+ FRAG_CB(skb)->next_frag = FRAG_CB(head)->next_frag;
+ rb_replace_node(&head->rbnode, &skb->rbnode,
+ &qp->q.rb_fragments);
+ consume_skb(head);
+ head = skb;
}
- WARN_ON(!head);
- WARN_ON(FRAG_CB(head)->offset != 0);
+ WARN_ON(head->ip_defrag_offset != 0);
/* Allocate a new buffer for the datagram. */
ihlen = ip_hdrlen(head);
@@ -595,35 +566,61 @@
clone = alloc_skb(0, GFP_ATOMIC);
if (!clone)
goto out_nomem;
- clone->next = head->next;
- head->next = clone;
skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list;
skb_frag_list_init(head);
for (i = 0; i < skb_shinfo(head)->nr_frags; i++)
plen += skb_frag_size(&skb_shinfo(head)->frags[i]);
clone->len = clone->data_len = head->data_len - plen;
- head->data_len -= clone->len;
- head->len -= clone->len;
+ head->truesize += clone->truesize;
clone->csum = 0;
clone->ip_summed = head->ip_summed;
add_frag_mem_limit(qp->q.net, clone->truesize);
+ skb_shinfo(head)->frag_list = clone;
+ nextp = &clone->next;
+ } else {
+ nextp = &skb_shinfo(head)->frag_list;
}
- skb_shinfo(head)->frag_list = head->next;
skb_push(head, head->data - skb_network_header(head));
- for (fp=head->next; fp; fp = fp->next) {
- head->data_len += fp->len;
- head->len += fp->len;
- if (head->ip_summed != fp->ip_summed)
- head->ip_summed = CHECKSUM_NONE;
- else if (head->ip_summed == CHECKSUM_COMPLETE)
- head->csum = csum_add(head->csum, fp->csum);
- head->truesize += fp->truesize;
+ /* Traverse the tree in order, to build frag_list. */
+ fp = FRAG_CB(head)->next_frag;
+ rbn = rb_next(&head->rbnode);
+ rb_erase(&head->rbnode, &qp->q.rb_fragments);
+ while (rbn || fp) {
+ /* fp points to the next sk_buff in the current run;
+ * rbn points to the next run.
+ */
+ /* Go through the current run. */
+ while (fp) {
+ *nextp = fp;
+ nextp = &fp->next;
+ fp->prev = NULL;
+ memset(&fp->rbnode, 0, sizeof(fp->rbnode));
+ fp->sk = NULL;
+ head->data_len += fp->len;
+ head->len += fp->len;
+ if (head->ip_summed != fp->ip_summed)
+ head->ip_summed = CHECKSUM_NONE;
+ else if (head->ip_summed == CHECKSUM_COMPLETE)
+ head->csum = csum_add(head->csum, fp->csum);
+ head->truesize += fp->truesize;
+ fp = FRAG_CB(fp)->next_frag;
+ }
+ /* Move to the next run. */
+ if (rbn) {
+ struct rb_node *rbnext = rb_next(rbn);
+
+ fp = rb_to_skb(rbn);
+ rb_erase(rbn, &qp->q.rb_fragments);
+ rbn = rbnext;
+ }
}
sub_frag_mem_limit(qp->q.net, head->truesize);
+ *nextp = NULL;
head->next = NULL;
+ head->prev = NULL;
head->dev = dev;
head->tstamp = qp->q.stamp;
IPCB(head)->frag_max_size = max(qp->max_df_size, qp->q.max_size);
@@ -651,7 +648,9 @@
__IP_INC_STATS(net, IPSTATS_MIB_REASMOKS);
qp->q.fragments = NULL;
+ qp->q.rb_fragments = RB_ROOT;
qp->q.fragments_tail = NULL;
+ qp->q.last_run_head = NULL;
return 0;
out_nomem:
@@ -659,7 +658,7 @@
err = -ENOMEM;
goto out_fail;
out_oversize:
- net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->saddr);
+ net_info_ratelimited("Oversized IP packet from %pI4\n", &qp->q.key.v4.saddr);
out_fail:
__IP_INC_STATS(net, IPSTATS_MIB_REASMFAILS);
return err;
@@ -733,25 +732,46 @@
}
EXPORT_SYMBOL(ip_check_defrag);
+unsigned int inet_frag_rbtree_purge(struct rb_root *root)
+{
+ struct rb_node *p = rb_first(root);
+ unsigned int sum = 0;
+
+ while (p) {
+ struct sk_buff *skb = rb_entry(p, struct sk_buff, rbnode);
+
+ p = rb_next(p);
+ rb_erase(&skb->rbnode, root);
+ while (skb) {
+ struct sk_buff *next = FRAG_CB(skb)->next_frag;
+
+ sum += skb->truesize;
+ kfree_skb(skb);
+ skb = next;
+ }
+ }
+ return sum;
+}
+EXPORT_SYMBOL(inet_frag_rbtree_purge);
+
#ifdef CONFIG_SYSCTL
-static int zero;
+static int dist_min;
static struct ctl_table ip4_frags_ns_ctl_table[] = {
{
.procname = "ipfrag_high_thresh",
.data = &init_net.ipv4.frags.high_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
.extra1 = &init_net.ipv4.frags.low_thresh
},
{
.procname = "ipfrag_low_thresh",
.data = &init_net.ipv4.frags.low_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .proc_handler = proc_doulongvec_minmax,
.extra2 = &init_net.ipv4.frags.high_thresh
},
{
@@ -767,7 +787,7 @@
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = proc_dointvec_minmax,
- .extra1 = &zero
+ .extra1 = &dist_min,
},
{ }
};
@@ -849,6 +869,8 @@
static int __net_init ipv4_frags_init_net(struct net *net)
{
+ int res;
+
/* Fragment cache limits.
*
* The fragment memory accounting code, (tries to) account for
@@ -873,16 +895,21 @@
net->ipv4.frags.timeout = IP_FRAG_TIME;
net->ipv4.frags.max_dist = 64;
+ net->ipv4.frags.f = &ip4_frags;
- inet_frags_init_net(&net->ipv4.frags);
-
- return ip4_frags_ns_ctl_register(net);
+ res = inet_frags_init_net(&net->ipv4.frags);
+ if (res < 0)
+ return res;
+ res = ip4_frags_ns_ctl_register(net);
+ if (res < 0)
+ inet_frags_exit_net(&net->ipv4.frags);
+ return res;
}
static void __net_exit ipv4_frags_exit_net(struct net *net)
{
ip4_frags_ns_ctl_unregister(net);
- inet_frags_exit_net(&net->ipv4.frags, &ip4_frags);
+ inet_frags_exit_net(&net->ipv4.frags);
}
static struct pernet_operations ip4_frags_ops = {
@@ -890,17 +917,49 @@
.exit = ipv4_frags_exit_net,
};
+
+static u32 ip4_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ return jhash2(data,
+ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
+}
+
+static u32 ip4_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
+
+ return jhash2((const u32 *)&fq->key.v4,
+ sizeof(struct frag_v4_compare_key) / sizeof(u32), seed);
+}
+
+static int ip4_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_v4_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+static const struct rhashtable_params ip4_rhash_params = {
+ .head_offset = offsetof(struct inet_frag_queue, node),
+ .key_offset = offsetof(struct inet_frag_queue, key),
+ .key_len = sizeof(struct frag_v4_compare_key),
+ .hashfn = ip4_key_hashfn,
+ .obj_hashfn = ip4_obj_hashfn,
+ .obj_cmpfn = ip4_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+
void __init ipfrag_init(void)
{
- ip4_frags_ctl_register();
- register_pernet_subsys(&ip4_frags_ops);
- ip4_frags.hashfn = ip4_hashfn;
ip4_frags.constructor = ip4_frag_init;
ip4_frags.destructor = ip4_frag_free;
ip4_frags.qsize = sizeof(struct ipq);
- ip4_frags.match = ip4_frag_match;
ip4_frags.frag_expire = ip_expire;
ip4_frags.frags_cache_name = ip_frag_cache_name;
+ ip4_frags.rhash_params = ip4_rhash_params;
if (inet_frags_init(&ip4_frags))
panic("IP: failed to allocate ip4_frags cache\n");
+ ip4_frags_ctl_register();
+ register_pernet_subsys(&ip4_frags_ops);
}
diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
index b21e435..a5851c0 100644
--- a/net/ipv4/ip_sockglue.c
+++ b/net/ipv4/ip_sockglue.c
@@ -134,7 +134,6 @@
static void ip_cmsg_recv_dstaddr(struct msghdr *msg, struct sk_buff *skb)
{
struct sockaddr_in sin;
- const struct iphdr *iph = ip_hdr(skb);
__be16 *ports;
int end;
@@ -149,7 +148,7 @@
ports = (__be16 *)skb_transport_header(skb);
sin.sin_family = AF_INET;
- sin.sin_addr.s_addr = iph->daddr;
+ sin.sin_addr.s_addr = ip_hdr(skb)->daddr;
sin.sin_port = ports[1];
memset(sin.sin_zero, 0, sizeof(sin.sin_zero));
diff --git a/net/ipv4/ip_tunnel.c b/net/ipv4/ip_tunnel.c
index e1271e75..d8d99c2 100644
--- a/net/ipv4/ip_tunnel.c
+++ b/net/ipv4/ip_tunnel.c
@@ -627,6 +627,7 @@
const struct iphdr *tnl_params, u8 protocol)
{
struct ip_tunnel *tunnel = netdev_priv(dev);
+ unsigned int inner_nhdr_len = 0;
const struct iphdr *inner_iph;
struct flowi4 fl4;
u8 tos, ttl;
@@ -636,6 +637,14 @@
__be32 dst;
bool connected;
+ /* ensure we can access the inner net header, for several users below */
+ if (skb->protocol == htons(ETH_P_IP))
+ inner_nhdr_len = sizeof(struct iphdr);
+ else if (skb->protocol == htons(ETH_P_IPV6))
+ inner_nhdr_len = sizeof(struct ipv6hdr);
+ if (unlikely(!pskb_may_pull(skb, inner_nhdr_len)))
+ goto tx_error;
+
inner_iph = (const struct iphdr *)skb_inner_network_header(skb);
connected = (tunnel->parms.iph.daddr != 0);
diff --git a/net/ipv4/proc.c b/net/ipv4/proc.c
index 7143ca1..ec48d8ea 100644
--- a/net/ipv4/proc.c
+++ b/net/ipv4/proc.c
@@ -54,7 +54,6 @@
static int sockstat_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
- unsigned int frag_mem;
int orphans, sockets;
local_bh_disable();
@@ -74,8 +73,9 @@
sock_prot_inuse_get(net, &udplite_prot));
seq_printf(seq, "RAW: inuse %d\n",
sock_prot_inuse_get(net, &raw_prot));
- frag_mem = ip_frag_mem(net);
- seq_printf(seq, "FRAG: inuse %u memory %u\n", !!frag_mem, frag_mem);
+ seq_printf(seq, "FRAG: inuse %u memory %lu\n",
+ atomic_read(&net->ipv4.frags.rhashtable.nelems),
+ frag_mem_limit(&net->ipv4.frags));
return 0;
}
@@ -134,6 +134,7 @@
SNMP_MIB_ITEM("InECT1Pkts", IPSTATS_MIB_ECT1PKTS),
SNMP_MIB_ITEM("InECT0Pkts", IPSTATS_MIB_ECT0PKTS),
SNMP_MIB_ITEM("InCEPkts", IPSTATS_MIB_CEPKTS),
+ SNMP_MIB_ITEM("ReasmOverlaps", IPSTATS_MIB_REASM_OVERLAPS),
SNMP_MIB_SENTINEL
};
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 8fc8c8d..5f6dc5f 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -4407,7 +4407,7 @@
p = rb_first(&tp->out_of_order_queue);
while (p) {
- skb = rb_entry(p, struct sk_buff, rbnode);
+ skb = rb_to_skb(p);
if (after(TCP_SKB_CB(skb)->seq, tp->rcv_nxt))
break;
@@ -4471,7 +4471,7 @@
static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb)
{
struct tcp_sock *tp = tcp_sk(sk);
- struct rb_node **p, *q, *parent;
+ struct rb_node **p, *parent;
struct sk_buff *skb1;
u32 seq, end_seq;
bool fragstolen;
@@ -4530,7 +4530,7 @@
parent = NULL;
while (*p) {
parent = *p;
- skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ skb1 = rb_to_skb(parent);
if (before(seq, TCP_SKB_CB(skb1)->seq)) {
p = &parent->rb_left;
continue;
@@ -4575,9 +4575,7 @@
merge_right:
/* Remove other segments covered by skb. */
- while ((q = rb_next(&skb->rbnode)) != NULL) {
- skb1 = rb_entry(q, struct sk_buff, rbnode);
-
+ while ((skb1 = skb_rb_next(skb)) != NULL) {
if (!after(end_seq, TCP_SKB_CB(skb1)->seq))
break;
if (before(end_seq, TCP_SKB_CB(skb1)->end_seq)) {
@@ -4592,7 +4590,7 @@
tcp_drop(sk, skb1);
}
/* If there is no skb after us, we are the last_skb ! */
- if (!q)
+ if (!skb1)
tp->ooo_last_skb = skb;
add_sack:
@@ -4793,7 +4791,7 @@
if (list)
return !skb_queue_is_last(list, skb) ? skb->next : NULL;
- return rb_entry_safe(rb_next(&skb->rbnode), struct sk_buff, rbnode);
+ return skb_rb_next(skb);
}
static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
@@ -4822,7 +4820,7 @@
while (*p) {
parent = *p;
- skb1 = rb_entry(parent, struct sk_buff, rbnode);
+ skb1 = rb_to_skb(parent);
if (before(TCP_SKB_CB(skb)->seq, TCP_SKB_CB(skb1)->seq))
p = &parent->rb_left;
else
@@ -4942,19 +4940,12 @@
struct tcp_sock *tp = tcp_sk(sk);
u32 range_truesize, sum_tiny = 0;
struct sk_buff *skb, *head;
- struct rb_node *p;
u32 start, end;
- p = rb_first(&tp->out_of_order_queue);
- skb = rb_entry_safe(p, struct sk_buff, rbnode);
+ skb = skb_rb_first(&tp->out_of_order_queue);
new_range:
if (!skb) {
- p = rb_last(&tp->out_of_order_queue);
- /* Note: This is possible p is NULL here. We do not
- * use rb_entry_safe(), as ooo_last_skb is valid only
- * if rbtree is not empty.
- */
- tp->ooo_last_skb = rb_entry(p, struct sk_buff, rbnode);
+ tp->ooo_last_skb = skb_rb_last(&tp->out_of_order_queue);
return;
}
start = TCP_SKB_CB(skb)->seq;
@@ -4962,7 +4953,7 @@
range_truesize = skb->truesize;
for (head = skb;;) {
- skb = tcp_skb_next(skb, NULL);
+ skb = skb_rb_next(skb);
/* Range is terminated when we see a gap or when
* we are at the queue end.
@@ -5018,7 +5009,7 @@
prev = rb_prev(node);
rb_erase(node, &tp->out_of_order_queue);
goal -= rb_to_skb(node)->truesize;
- tcp_drop(sk, rb_entry(node, struct sk_buff, rbnode));
+ tcp_drop(sk, rb_to_skb(node));
if (!prev || goal <= 0) {
sk_mem_reclaim(sk);
if (atomic_read(&sk->sk_rmem_alloc) <= sk->sk_rcvbuf &&
@@ -5028,7 +5019,7 @@
}
node = prev;
} while (node);
- tp->ooo_last_skb = rb_entry(prev, struct sk_buff, rbnode);
+ tp->ooo_last_skb = rb_to_skb(prev);
/* Reset SACK state. A conforming SACK implementation will
* do the same at a timeout based retransmit. When a connection
@@ -5980,11 +5971,13 @@
if (th->fin)
goto discard;
/* It is possible that we process SYN packets from backlog,
- * so we need to make sure to disable BH right there.
+ * so we need to make sure to disable BH and RCU right there.
*/
+ rcu_read_lock();
local_bh_disable();
acceptable = icsk->icsk_af_ops->conn_request(sk, skb) >= 0;
local_bh_enable();
+ rcu_read_unlock();
if (!acceptable)
return 1;
diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
index 07c8fd2..9bfa876 100644
--- a/net/ipv4/tcp_ipv4.c
+++ b/net/ipv4/tcp_ipv4.c
@@ -867,9 +867,11 @@
if (skb) {
__tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
+ rcu_read_lock();
err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
ireq->ir_rmt_addr,
- ireq_opt_deref(ireq));
+ rcu_dereference(ireq->ireq_opt));
+ rcu_read_unlock();
err = net_xmit_eval(err);
}
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index 885cc39..789bbcb 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1740,6 +1740,28 @@
inet_compute_pseudo);
}
+/* wrapper for udp_queue_rcv_skb tacking care of csum conversion and
+ * return code conversion for ip layer consumption
+ */
+static int udp_unicast_rcv_skb(struct sock *sk, struct sk_buff *skb,
+ struct udphdr *uh)
+{
+ int ret;
+
+ if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
+ skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
+ inet_compute_pseudo);
+
+ ret = udp_queue_rcv_skb(sk, skb);
+
+ /* a return value > 0 means to resubmit the input, but
+ * it wants the return to be -protocol, or 0
+ */
+ if (ret > 0)
+ return -ret;
+ return 0;
+}
+
/*
* All we need to do is get the socket, and then do a checksum.
*/
@@ -1786,14 +1808,9 @@
if (unlikely(sk->sk_rx_dst != dst))
udp_sk_rx_dst_set(sk, dst);
- ret = udp_queue_rcv_skb(sk, skb);
+ ret = udp_unicast_rcv_skb(sk, skb, uh);
sock_put(sk);
- /* a return value > 0 means to resubmit the input, but
- * it wants the return to be -protocol, or 0
- */
- if (ret > 0)
- return -ret;
- return 0;
+ return ret;
}
if (rt->rt_flags & (RTCF_BROADCAST|RTCF_MULTICAST))
@@ -1801,22 +1818,8 @@
saddr, daddr, udptable, proto);
sk = __udp4_lib_lookup_skb(skb, uh->source, uh->dest, udptable);
- if (sk) {
- int ret;
-
- if (inet_get_convert_csum(sk) && uh->check && !IS_UDPLITE(sk))
- skb_checksum_try_convert(skb, IPPROTO_UDP, uh->check,
- inet_compute_pseudo);
-
- ret = udp_queue_rcv_skb(sk, skb);
-
- /* a return value > 0 means to resubmit the input, but
- * it wants the return to be -protocol, or 0
- */
- if (ret > 0)
- return -ret;
- return 0;
- }
+ if (sk)
+ return udp_unicast_rcv_skb(sk, skb, uh);
if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
goto drop;
diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
index 9077060..e838cf9 100644
--- a/net/ipv6/addrconf.c
+++ b/net/ipv6/addrconf.c
@@ -4113,7 +4113,6 @@
p++;
continue;
}
- state->offset++;
return ifa;
}
@@ -4137,13 +4136,12 @@
return ifa;
}
+ state->offset = 0;
while (++state->bucket < IN6_ADDR_HSIZE) {
- state->offset = 0;
hlist_for_each_entry_rcu_bh(ifa,
&inet6_addr_lst[state->bucket], addr_lst) {
if (!net_eq(dev_net(ifa->idev->dev), net))
continue;
- state->offset++;
return ifa;
}
}
diff --git a/net/ipv6/ip6_offload.c b/net/ipv6/ip6_offload.c
index 3cdf4dc..7c539de 100644
--- a/net/ipv6/ip6_offload.c
+++ b/net/ipv6/ip6_offload.c
@@ -113,6 +113,7 @@
payload_len = skb->len - nhoff - sizeof(*ipv6h);
ipv6h->payload_len = htons(payload_len);
skb->network_header = (u8 *)ipv6h - skb->head;
+ skb_reset_mac_len(skb);
if (udpfrag) {
int err = ip6_find_1stfragopt(skb, &prevhdr);
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index def627f..46f8e7c 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -210,12 +210,10 @@
kfree_skb(skb);
return -ENOBUFS;
}
+ if (skb->sk)
+ skb_set_owner_w(skb2, skb->sk);
consume_skb(skb);
skb = skb2;
- /* skb_set_owner_w() changes sk->sk_wmem_alloc atomically,
- * it is safe to call in our context (socket lock not held)
- */
- skb_set_owner_w(skb, (struct sock *)sk);
}
if (opt->opt_flen)
ipv6_push_frag_opts(skb, opt, &proto);
diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
index 0149bfd..40d483e 100644
--- a/net/ipv6/ip6_tunnel.c
+++ b/net/ipv6/ip6_tunnel.c
@@ -1226,7 +1226,7 @@
ip4ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- const struct iphdr *iph = ip_hdr(skb);
+ const struct iphdr *iph;
int encap_limit = -1;
struct flowi6 fl6;
__u8 dsfield;
@@ -1234,6 +1234,11 @@
u8 tproto;
int err;
+ /* ensure we can access the full inner ip header */
+ if (!pskb_may_pull(skb, sizeof(struct iphdr)))
+ return -1;
+
+ iph = ip_hdr(skb);
memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
tproto = ACCESS_ONCE(t->parms.proto);
@@ -1295,7 +1300,7 @@
ip6ip6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
{
struct ip6_tnl *t = netdev_priv(dev);
- struct ipv6hdr *ipv6h = ipv6_hdr(skb);
+ struct ipv6hdr *ipv6h;
int encap_limit = -1;
__u16 offset;
struct flowi6 fl6;
@@ -1304,6 +1309,10 @@
u8 tproto;
int err;
+ if (unlikely(!pskb_may_pull(skb, sizeof(*ipv6h))))
+ return -1;
+
+ ipv6h = ipv6_hdr(skb);
tproto = ACCESS_ONCE(t->parms.proto);
if ((tproto != IPPROTO_IPV6 && tproto != 0) ||
ip6_tnl_addr_conflict(t, ipv6h))
diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
index ee33a67..b9147558 100644
--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
@@ -63,7 +63,6 @@
static struct inet_frags nf_frags;
#ifdef CONFIG_SYSCTL
-static int zero;
static struct ctl_table nf_ct_frag6_sysctl_table[] = {
{
@@ -76,18 +75,17 @@
{
.procname = "nf_conntrack_frag6_low_thresh",
.data = &init_net.nf_frag.frags.low_thresh,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .proc_handler = proc_doulongvec_minmax,
.extra2 = &init_net.nf_frag.frags.high_thresh
},
{
.procname = "nf_conntrack_frag6_high_thresh",
.data = &init_net.nf_frag.frags.high_thresh,
- .maxlen = sizeof(unsigned int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
.extra1 = &init_net.nf_frag.frags.low_thresh
},
{ }
@@ -152,23 +150,6 @@
return 1 << (ipv6_get_dsfield(ipv6h) & INET_ECN_MASK);
}
-static unsigned int nf_hash_frag(__be32 id, const struct in6_addr *saddr,
- const struct in6_addr *daddr)
-{
- net_get_random_once(&nf_frags.rnd, sizeof(nf_frags.rnd));
- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
- (__force u32)id, nf_frags.rnd);
-}
-
-
-static unsigned int nf_hashfn(const struct inet_frag_queue *q)
-{
- const struct frag_queue *nq;
-
- nq = container_of(q, struct frag_queue, q);
- return nf_hash_frag(nq->id, &nq->saddr, &nq->daddr);
-}
-
static void nf_ct_frag6_expire(unsigned long data)
{
struct frag_queue *fq;
@@ -177,34 +158,26 @@
fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
net = container_of(fq->q.net, struct net, nf_frag.frags);
- ip6_expire_frag_queue(net, fq, &nf_frags);
+ ip6_expire_frag_queue(net, fq);
}
/* Creation primitives. */
-static inline struct frag_queue *fq_find(struct net *net, __be32 id,
- u32 user, struct in6_addr *src,
- struct in6_addr *dst, int iif, u8 ecn)
+static struct frag_queue *fq_find(struct net *net, __be32 id, u32 user,
+ const struct ipv6hdr *hdr, int iif)
{
+ struct frag_v6_compare_key key = {
+ .id = id,
+ .saddr = hdr->saddr,
+ .daddr = hdr->daddr,
+ .user = user,
+ .iif = iif,
+ };
struct inet_frag_queue *q;
- struct ip6_create_arg arg;
- unsigned int hash;
- arg.id = id;
- arg.user = user;
- arg.src = src;
- arg.dst = dst;
- arg.iif = iif;
- arg.ecn = ecn;
-
- local_bh_disable();
- hash = nf_hash_frag(id, src, dst);
-
- q = inet_frag_find(&net->nf_frag.frags, &nf_frags, &arg, hash);
- local_bh_enable();
- if (IS_ERR_OR_NULL(q)) {
- inet_frag_maybe_warn_overflow(q, pr_fmt());
+ q = inet_frag_find(&net->nf_frag.frags, &key);
+ if (!q)
return NULL;
- }
+
return container_of(q, struct frag_queue, q);
}
@@ -263,7 +236,7 @@
* this case. -DaveM
*/
pr_debug("end of fragment not rounded to 8 bytes.\n");
- inet_frag_kill(&fq->q, &nf_frags);
+ inet_frag_kill(&fq->q);
return -EPROTO;
}
if (end > fq->q.len) {
@@ -356,7 +329,7 @@
return 0;
discard_fq:
- inet_frag_kill(&fq->q, &nf_frags);
+ inet_frag_kill(&fq->q);
err:
return -EINVAL;
}
@@ -378,7 +351,7 @@
int payload_len;
u8 ecn;
- inet_frag_kill(&fq->q, &nf_frags);
+ inet_frag_kill(&fq->q);
WARN_ON(head == NULL);
WARN_ON(NFCT_FRAG6_CB(head)->offset != 0);
@@ -479,6 +452,7 @@
else if (head->ip_summed == CHECKSUM_COMPLETE)
head->csum = csum_add(head->csum, fp->csum);
head->truesize += fp->truesize;
+ fp->sk = NULL;
}
sub_frag_mem_limit(fq->q.net, head->truesize);
@@ -497,6 +471,7 @@
head->csum);
fq->q.fragments = NULL;
+ fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
return true;
@@ -591,9 +566,13 @@
hdr = ipv6_hdr(skb);
fhdr = (struct frag_hdr *)skb_transport_header(skb);
+ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
+ fhdr->frag_off & htons(IP6_MF))
+ return -EINVAL;
+
skb_orphan(skb);
- fq = fq_find(net, fhdr->identification, user, &hdr->saddr, &hdr->daddr,
- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ fq = fq_find(net, fhdr->identification, user, hdr,
+ skb->dev ? skb->dev->ifindex : 0);
if (fq == NULL) {
pr_debug("Can't find and can't create new queue\n");
return -ENOMEM;
@@ -623,25 +602,33 @@
out_unlock:
spin_unlock_bh(&fq->q.lock);
- inet_frag_put(&fq->q, &nf_frags);
+ inet_frag_put(&fq->q);
return ret;
}
EXPORT_SYMBOL_GPL(nf_ct_frag6_gather);
static int nf_ct_net_init(struct net *net)
{
+ int res;
+
net->nf_frag.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->nf_frag.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->nf_frag.frags.timeout = IPV6_FRAG_TIMEOUT;
- inet_frags_init_net(&net->nf_frag.frags);
+ net->nf_frag.frags.f = &nf_frags;
- return nf_ct_frag6_sysctl_register(net);
+ res = inet_frags_init_net(&net->nf_frag.frags);
+ if (res < 0)
+ return res;
+ res = nf_ct_frag6_sysctl_register(net);
+ if (res < 0)
+ inet_frags_exit_net(&net->nf_frag.frags);
+ return res;
}
static void nf_ct_net_exit(struct net *net)
{
nf_ct_frags6_sysctl_unregister(net);
- inet_frags_exit_net(&net->nf_frag.frags, &nf_frags);
+ inet_frags_exit_net(&net->nf_frag.frags);
}
static struct pernet_operations nf_ct_net_ops = {
@@ -653,13 +640,12 @@
{
int ret = 0;
- nf_frags.hashfn = nf_hashfn;
nf_frags.constructor = ip6_frag_init;
nf_frags.destructor = NULL;
nf_frags.qsize = sizeof(struct frag_queue);
- nf_frags.match = ip6_frag_match;
nf_frags.frag_expire = nf_ct_frag6_expire;
nf_frags.frags_cache_name = nf_frags_cache_name;
+ nf_frags.rhash_params = ip6_rhash_params;
ret = inet_frags_init(&nf_frags);
if (ret)
goto out;
diff --git a/net/ipv6/proc.c b/net/ipv6/proc.c
index e88bcb8..dc04c02 100644
--- a/net/ipv6/proc.c
+++ b/net/ipv6/proc.c
@@ -38,7 +38,6 @@
static int sockstat6_seq_show(struct seq_file *seq, void *v)
{
struct net *net = seq->private;
- unsigned int frag_mem = ip6_frag_mem(net);
seq_printf(seq, "TCP6: inuse %d\n",
sock_prot_inuse_get(net, &tcpv6_prot));
@@ -48,7 +47,9 @@
sock_prot_inuse_get(net, &udplitev6_prot));
seq_printf(seq, "RAW6: inuse %d\n",
sock_prot_inuse_get(net, &rawv6_prot));
- seq_printf(seq, "FRAG6: inuse %u memory %u\n", !!frag_mem, frag_mem);
+ seq_printf(seq, "FRAG6: inuse %u memory %lu\n",
+ atomic_read(&net->ipv6.frags.rhashtable.nelems),
+ frag_mem_limit(&net->ipv6.frags));
return 0;
}
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index ff701ce..55d284a 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -645,8 +645,6 @@
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
- skb_dst_set(skb, &rt->dst);
- *dstp = NULL;
skb_put(skb, length);
skb_reset_network_header(skb);
@@ -656,8 +654,14 @@
skb->transport_header = skb->network_header;
err = memcpy_from_msg(iph, msg, length);
- if (err)
- goto error_fault;
+ if (err) {
+ err = -EFAULT;
+ kfree_skb(skb);
+ goto error;
+ }
+
+ skb_dst_set(skb, &rt->dst);
+ *dstp = NULL;
/* if egress device is enslaved to an L3 master device pass the
* skb to its handler for processing
@@ -666,21 +670,28 @@
if (unlikely(!skb))
return 0;
+ /* Acquire rcu_read_lock() in case we need to use rt->rt6i_idev
+ * in the error path. Since skb has been freed, the dst could
+ * have been queued for deletion.
+ */
+ rcu_read_lock();
IP6_UPD_PO_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUT, skb->len);
err = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT, net, sk, skb,
NULL, rt->dst.dev, dst_output);
if (err > 0)
err = net_xmit_errno(err);
- if (err)
- goto error;
+ if (err) {
+ IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+ rcu_read_unlock();
+ goto error_check;
+ }
+ rcu_read_unlock();
out:
return 0;
-error_fault:
- err = -EFAULT;
- kfree_skb(skb);
error:
IP6_INC_STATS(net, rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
+error_check:
if (err == -ENOBUFS && !np->recverr)
err = 0;
return err;
diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
index e585c0a..74ffbcb 100644
--- a/net/ipv6/reassembly.c
+++ b/net/ipv6/reassembly.c
@@ -79,94 +79,58 @@
static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev,
struct net_device *dev);
-/*
- * callers should be careful not to use the hash value outside the ipfrag_lock
- * as doing so could race with ipfrag_hash_rnd being recalculated.
- */
-static unsigned int inet6_hash_frag(__be32 id, const struct in6_addr *saddr,
- const struct in6_addr *daddr)
-{
- net_get_random_once(&ip6_frags.rnd, sizeof(ip6_frags.rnd));
- return jhash_3words(ipv6_addr_hash(saddr), ipv6_addr_hash(daddr),
- (__force u32)id, ip6_frags.rnd);
-}
-
-static unsigned int ip6_hashfn(const struct inet_frag_queue *q)
-{
- const struct frag_queue *fq;
-
- fq = container_of(q, struct frag_queue, q);
- return inet6_hash_frag(fq->id, &fq->saddr, &fq->daddr);
-}
-
-bool ip6_frag_match(const struct inet_frag_queue *q, const void *a)
-{
- const struct frag_queue *fq;
- const struct ip6_create_arg *arg = a;
-
- fq = container_of(q, struct frag_queue, q);
- return fq->id == arg->id &&
- fq->user == arg->user &&
- ipv6_addr_equal(&fq->saddr, arg->src) &&
- ipv6_addr_equal(&fq->daddr, arg->dst) &&
- (arg->iif == fq->iif ||
- !(ipv6_addr_type(arg->dst) & (IPV6_ADDR_MULTICAST |
- IPV6_ADDR_LINKLOCAL)));
-}
-EXPORT_SYMBOL(ip6_frag_match);
-
void ip6_frag_init(struct inet_frag_queue *q, const void *a)
{
struct frag_queue *fq = container_of(q, struct frag_queue, q);
- const struct ip6_create_arg *arg = a;
+ const struct frag_v6_compare_key *key = a;
- fq->id = arg->id;
- fq->user = arg->user;
- fq->saddr = *arg->src;
- fq->daddr = *arg->dst;
- fq->ecn = arg->ecn;
+ q->key.v6 = *key;
+ fq->ecn = 0;
}
EXPORT_SYMBOL(ip6_frag_init);
-void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq,
- struct inet_frags *frags)
+void ip6_expire_frag_queue(struct net *net, struct frag_queue *fq)
{
struct net_device *dev = NULL;
+ struct sk_buff *head;
+ rcu_read_lock();
spin_lock(&fq->q.lock);
if (fq->q.flags & INET_FRAG_COMPLETE)
goto out;
- inet_frag_kill(&fq->q, frags);
+ inet_frag_kill(&fq->q);
- rcu_read_lock();
dev = dev_get_by_index_rcu(net, fq->iif);
if (!dev)
- goto out_rcu_unlock;
+ goto out;
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMFAILS);
-
- if (inet_frag_evicting(&fq->q))
- goto out_rcu_unlock;
-
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMTIMEOUT);
/* Don't send error if the first segment did not arrive. */
- if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !fq->q.fragments)
- goto out_rcu_unlock;
+ head = fq->q.fragments;
+ if (!(fq->q.flags & INET_FRAG_FIRST_IN) || !head)
+ goto out;
/* But use as source device on which LAST ARRIVED
* segment was received. And do not use fq->dev
* pointer directly, device might already disappeared.
*/
- fq->q.fragments->dev = dev;
- icmpv6_send(fq->q.fragments, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
-out_rcu_unlock:
- rcu_read_unlock();
+ head->dev = dev;
+ skb_get(head);
+ spin_unlock(&fq->q.lock);
+
+ icmpv6_send(head, ICMPV6_TIME_EXCEED, ICMPV6_EXC_FRAGTIME, 0);
+ kfree_skb(head);
+ goto out_rcu_unlock;
+
out:
spin_unlock(&fq->q.lock);
- inet_frag_put(&fq->q, frags);
+out_rcu_unlock:
+ rcu_read_unlock();
+ inet_frag_put(&fq->q);
}
EXPORT_SYMBOL(ip6_expire_frag_queue);
@@ -178,31 +142,29 @@
fq = container_of((struct inet_frag_queue *)data, struct frag_queue, q);
net = container_of(fq->q.net, struct net, ipv6.frags);
- ip6_expire_frag_queue(net, fq, &ip6_frags);
+ ip6_expire_frag_queue(net, fq);
}
static struct frag_queue *
-fq_find(struct net *net, __be32 id, const struct in6_addr *src,
- const struct in6_addr *dst, int iif, u8 ecn)
+fq_find(struct net *net, __be32 id, const struct ipv6hdr *hdr, int iif)
{
+ struct frag_v6_compare_key key = {
+ .id = id,
+ .saddr = hdr->saddr,
+ .daddr = hdr->daddr,
+ .user = IP6_DEFRAG_LOCAL_DELIVER,
+ .iif = iif,
+ };
struct inet_frag_queue *q;
- struct ip6_create_arg arg;
- unsigned int hash;
- arg.id = id;
- arg.user = IP6_DEFRAG_LOCAL_DELIVER;
- arg.src = src;
- arg.dst = dst;
- arg.iif = iif;
- arg.ecn = ecn;
+ if (!(ipv6_addr_type(&hdr->daddr) & (IPV6_ADDR_MULTICAST |
+ IPV6_ADDR_LINKLOCAL)))
+ key.iif = 0;
- hash = inet6_hash_frag(id, src, dst);
-
- q = inet_frag_find(&net->ipv6.frags, &ip6_frags, &arg, hash);
- if (IS_ERR_OR_NULL(q)) {
- inet_frag_maybe_warn_overflow(q, pr_fmt());
+ q = inet_frag_find(&net->ipv6.frags, &key);
+ if (!q)
return NULL;
- }
+
return container_of(q, struct frag_queue, q);
}
@@ -359,7 +321,7 @@
return -1;
discard_fq:
- inet_frag_kill(&fq->q, &ip6_frags);
+ inet_frag_kill(&fq->q);
err:
__IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
IPSTATS_MIB_REASMFAILS);
@@ -386,7 +348,7 @@
int sum_truesize;
u8 ecn;
- inet_frag_kill(&fq->q, &ip6_frags);
+ inet_frag_kill(&fq->q);
ecn = ip_frag_ecn_table[fq->ecn];
if (unlikely(ecn == 0xff))
@@ -504,6 +466,7 @@
__IP6_INC_STATS(net, __in6_dev_get(dev), IPSTATS_MIB_REASMOKS);
rcu_read_unlock();
fq->q.fragments = NULL;
+ fq->q.rb_fragments = RB_ROOT;
fq->q.fragments_tail = NULL;
return 1;
@@ -525,6 +488,7 @@
struct frag_queue *fq;
const struct ipv6hdr *hdr = ipv6_hdr(skb);
struct net *net = dev_net(skb_dst(skb)->dev);
+ int iif;
if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED)
goto fail_hdr;
@@ -553,17 +517,22 @@
return 1;
}
- fq = fq_find(net, fhdr->identification, &hdr->saddr, &hdr->daddr,
- skb->dev ? skb->dev->ifindex : 0, ip6_frag_ecn(hdr));
+ if (skb->len - skb_network_offset(skb) < IPV6_MIN_MTU &&
+ fhdr->frag_off & htons(IP6_MF))
+ goto fail_hdr;
+
+ iif = skb->dev ? skb->dev->ifindex : 0;
+ fq = fq_find(net, fhdr->identification, hdr, iif);
if (fq) {
int ret;
spin_lock(&fq->q.lock);
+ fq->iif = iif;
ret = ip6_frag_queue(fq, skb, fhdr, IP6CB(skb)->nhoff);
spin_unlock(&fq->q.lock);
- inet_frag_put(&fq->q, &ip6_frags);
+ inet_frag_put(&fq->q);
return ret;
}
@@ -584,24 +553,22 @@
};
#ifdef CONFIG_SYSCTL
-static int zero;
static struct ctl_table ip6_frags_ns_ctl_table[] = {
{
.procname = "ip6frag_high_thresh",
.data = &init_net.ipv6.frags.high_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
+ .proc_handler = proc_doulongvec_minmax,
.extra1 = &init_net.ipv6.frags.low_thresh
},
{
.procname = "ip6frag_low_thresh",
.data = &init_net.ipv6.frags.low_thresh,
- .maxlen = sizeof(int),
+ .maxlen = sizeof(unsigned long),
.mode = 0644,
- .proc_handler = proc_dointvec_minmax,
- .extra1 = &zero,
+ .proc_handler = proc_doulongvec_minmax,
.extra2 = &init_net.ipv6.frags.high_thresh
},
{
@@ -644,10 +611,6 @@
table[1].data = &net->ipv6.frags.low_thresh;
table[1].extra2 = &net->ipv6.frags.high_thresh;
table[2].data = &net->ipv6.frags.timeout;
-
- /* Don't export sysctls to unprivileged users */
- if (net->user_ns != &init_user_ns)
- table[0].procname = NULL;
}
hdr = register_net_sysctl(net, "net/ipv6", table);
@@ -709,19 +672,27 @@
static int __net_init ipv6_frags_init_net(struct net *net)
{
+ int res;
+
net->ipv6.frags.high_thresh = IPV6_FRAG_HIGH_THRESH;
net->ipv6.frags.low_thresh = IPV6_FRAG_LOW_THRESH;
net->ipv6.frags.timeout = IPV6_FRAG_TIMEOUT;
+ net->ipv6.frags.f = &ip6_frags;
- inet_frags_init_net(&net->ipv6.frags);
+ res = inet_frags_init_net(&net->ipv6.frags);
+ if (res < 0)
+ return res;
- return ip6_frags_ns_sysctl_register(net);
+ res = ip6_frags_ns_sysctl_register(net);
+ if (res < 0)
+ inet_frags_exit_net(&net->ipv6.frags);
+ return res;
}
static void __net_exit ipv6_frags_exit_net(struct net *net)
{
ip6_frags_ns_sysctl_unregister(net);
- inet_frags_exit_net(&net->ipv6.frags, &ip6_frags);
+ inet_frags_exit_net(&net->ipv6.frags);
}
static struct pernet_operations ip6_frags_ops = {
@@ -729,14 +700,55 @@
.exit = ipv6_frags_exit_net,
};
+static u32 ip6_key_hashfn(const void *data, u32 len, u32 seed)
+{
+ return jhash2(data,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static u32 ip6_obj_hashfn(const void *data, u32 len, u32 seed)
+{
+ const struct inet_frag_queue *fq = data;
+
+ return jhash2((const u32 *)&fq->key.v6,
+ sizeof(struct frag_v6_compare_key) / sizeof(u32), seed);
+}
+
+static int ip6_obj_cmpfn(struct rhashtable_compare_arg *arg, const void *ptr)
+{
+ const struct frag_v6_compare_key *key = arg->key;
+ const struct inet_frag_queue *fq = ptr;
+
+ return !!memcmp(&fq->key, key, sizeof(*key));
+}
+
+const struct rhashtable_params ip6_rhash_params = {
+ .head_offset = offsetof(struct inet_frag_queue, node),
+ .hashfn = ip6_key_hashfn,
+ .obj_hashfn = ip6_obj_hashfn,
+ .obj_cmpfn = ip6_obj_cmpfn,
+ .automatic_shrinking = true,
+};
+EXPORT_SYMBOL(ip6_rhash_params);
+
int __init ipv6_frag_init(void)
{
int ret;
- ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
+ ip6_frags.constructor = ip6_frag_init;
+ ip6_frags.destructor = NULL;
+ ip6_frags.qsize = sizeof(struct frag_queue);
+ ip6_frags.frag_expire = ip6_frag_expire;
+ ip6_frags.frags_cache_name = ip6_frag_cache_name;
+ ip6_frags.rhash_params = ip6_rhash_params;
+ ret = inet_frags_init(&ip6_frags);
if (ret)
goto out;
+ ret = inet6_add_protocol(&frag_protocol, IPPROTO_FRAGMENT);
+ if (ret)
+ goto err_protocol;
+
ret = ip6_frags_sysctl_register();
if (ret)
goto err_sysctl;
@@ -745,16 +757,6 @@
if (ret)
goto err_pernet;
- ip6_frags.hashfn = ip6_hashfn;
- ip6_frags.constructor = ip6_frag_init;
- ip6_frags.destructor = NULL;
- ip6_frags.qsize = sizeof(struct frag_queue);
- ip6_frags.match = ip6_frag_match;
- ip6_frags.frag_expire = ip6_frag_expire;
- ip6_frags.frags_cache_name = ip6_frag_cache_name;
- ret = inet_frags_init(&ip6_frags);
- if (ret)
- goto err_pernet;
out:
return ret;
@@ -762,6 +764,8 @@
ip6_frags_sysctl_unregister();
err_sysctl:
inet6_del_protocol(&frag_protocol, IPPROTO_FRAGMENT);
+err_protocol:
+ inet_frags_fini(&ip6_frags);
goto out;
}
diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
index e63fd12..6ef9d32 100644
--- a/net/mac80211/cfg.c
+++ b/net/mac80211/cfg.c
@@ -386,7 +386,7 @@
case NL80211_IFTYPE_AP:
case NL80211_IFTYPE_AP_VLAN:
/* Keys without a station are used for TX only */
- if (key->sta && test_sta_flag(key->sta, WLAN_STA_MFP))
+ if (sta && test_sta_flag(sta, WLAN_STA_MFP))
key->conf.flags |= IEEE80211_KEY_FLAG_RX_MGMT;
break;
case NL80211_IFTYPE_ADHOC:
diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c
index a5acaf1..0c0695e 100644
--- a/net/mac80211/ibss.c
+++ b/net/mac80211/ibss.c
@@ -948,8 +948,8 @@
if (len < IEEE80211_DEAUTH_FRAME_LEN)
return;
- ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM BSSID=%pM (reason: %d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, reason);
+ ibss_dbg(sdata, "RX DeAuth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (reason: %d)\n", mgmt->bssid, reason);
sta_info_destroy_addr(sdata, mgmt->sa);
}
@@ -967,9 +967,9 @@
auth_alg = le16_to_cpu(mgmt->u.auth.auth_alg);
auth_transaction = le16_to_cpu(mgmt->u.auth.auth_transaction);
- ibss_dbg(sdata,
- "RX Auth SA=%pM DA=%pM BSSID=%pM (auth_transaction=%d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, auth_transaction);
+ ibss_dbg(sdata, "RX Auth SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (auth_transaction=%d)\n",
+ mgmt->bssid, auth_transaction);
if (auth_alg != WLAN_AUTH_OPEN || auth_transaction != 1)
return;
@@ -1176,10 +1176,10 @@
rx_timestamp = drv_get_tsf(local, sdata);
}
- ibss_dbg(sdata,
- "RX beacon SA=%pM BSSID=%pM TSF=0x%llx BCN=0x%llx diff=%lld @%lu\n",
+ ibss_dbg(sdata, "RX beacon SA=%pM BSSID=%pM TSF=0x%llx\n",
mgmt->sa, mgmt->bssid,
- (unsigned long long)rx_timestamp,
+ (unsigned long long)rx_timestamp);
+ ibss_dbg(sdata, "\tBCN=0x%llx diff=%lld @%lu\n",
(unsigned long long)beacon_timestamp,
(unsigned long long)(rx_timestamp - beacon_timestamp),
jiffies);
@@ -1538,9 +1538,9 @@
tx_last_beacon = drv_tx_last_beacon(local);
- ibss_dbg(sdata,
- "RX ProbeReq SA=%pM DA=%pM BSSID=%pM (tx_last_beacon=%d)\n",
- mgmt->sa, mgmt->da, mgmt->bssid, tx_last_beacon);
+ ibss_dbg(sdata, "RX ProbeReq SA=%pM DA=%pM\n", mgmt->sa, mgmt->da);
+ ibss_dbg(sdata, "\tBSSID=%pM (tx_last_beacon=%d)\n",
+ mgmt->bssid, tx_last_beacon);
if (!tx_last_beacon && is_multicast_ether_addr(mgmt->da))
return;
diff --git a/net/mac80211/main.c b/net/mac80211/main.c
index 2bb6899..e3bbfb2 100644
--- a/net/mac80211/main.c
+++ b/net/mac80211/main.c
@@ -254,8 +254,27 @@
"%s called with hardware scan in progress\n", __func__);
rtnl_lock();
- list_for_each_entry(sdata, &local->interfaces, list)
+ list_for_each_entry(sdata, &local->interfaces, list) {
+ /*
+ * XXX: there may be more work for other vif types and even
+ * for station mode: a good thing would be to run most of
+ * the iface type's dependent _stop (ieee80211_mg_stop,
+ * ieee80211_ibss_stop) etc...
+ * For now, fix only the specific bug that was seen: race
+ * between csa_connection_drop_work and us.
+ */
+ if (sdata->vif.type == NL80211_IFTYPE_STATION) {
+ /*
+ * This worker is scheduled from the iface worker that
+ * runs on mac80211's workqueue, so we can't be
+ * scheduling this worker after the cancel right here.
+ * The exception is ieee80211_chswitch_done.
+ * Then we can have a race...
+ */
+ cancel_work_sync(&sdata->u.mgd.csa_connection_drop_work);
+ }
flush_delayed_work(&sdata->dec_tailroom_needed_wk);
+ }
ieee80211_scan_cancel(local);
/* make sure any new ROC will consider local->in_reconfig */
@@ -466,10 +485,7 @@
cpu_to_le32(IEEE80211_VHT_CAP_RXLDPC |
IEEE80211_VHT_CAP_SHORT_GI_80 |
IEEE80211_VHT_CAP_SHORT_GI_160 |
- IEEE80211_VHT_CAP_RXSTBC_1 |
- IEEE80211_VHT_CAP_RXSTBC_2 |
- IEEE80211_VHT_CAP_RXSTBC_3 |
- IEEE80211_VHT_CAP_RXSTBC_4 |
+ IEEE80211_VHT_CAP_RXSTBC_MASK |
IEEE80211_VHT_CAP_TXSTBC |
IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
@@ -1164,6 +1180,7 @@
#if IS_ENABLED(CONFIG_IPV6)
unregister_inet6addr_notifier(&local->ifa6_notifier);
#endif
+ ieee80211_txq_teardown_flows(local);
rtnl_lock();
@@ -1191,7 +1208,6 @@
skb_queue_purge(&local->skb_queue);
skb_queue_purge(&local->skb_queue_unreliable);
skb_queue_purge(&local->skb_queue_tdls_chsw);
- ieee80211_txq_teardown_flows(local);
destroy_workqueue(local->workqueue);
wiphy_unregister(local->hw.wiphy);
diff --git a/net/mac80211/mesh_hwmp.c b/net/mac80211/mesh_hwmp.c
index fed598a..b0acb29 100644
--- a/net/mac80211/mesh_hwmp.c
+++ b/net/mac80211/mesh_hwmp.c
@@ -563,6 +563,10 @@
forward = false;
reply = true;
target_metric = 0;
+
+ if (SN_GT(target_sn, ifmsh->sn))
+ ifmsh->sn = target_sn;
+
if (time_after(jiffies, ifmsh->last_sn_update +
net_traversal_jiffies(sdata)) ||
time_before(jiffies, ifmsh->last_sn_update)) {
diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
index 973adf3..70d289d 100644
--- a/net/mac80211/mlme.c
+++ b/net/mac80211/mlme.c
@@ -989,6 +989,10 @@
*/
if (sdata->reserved_chanctx) {
+ struct ieee80211_supported_band *sband = NULL;
+ struct sta_info *mgd_sta = NULL;
+ enum ieee80211_sta_rx_bandwidth bw = IEEE80211_STA_RX_BW_20;
+
/*
* with multi-vif csa driver may call ieee80211_csa_finish()
* many times while waiting for other interfaces to use their
@@ -997,6 +1001,48 @@
if (sdata->reserved_ready)
goto out;
+ if (sdata->vif.bss_conf.chandef.width !=
+ sdata->csa_chandef.width) {
+ /*
+ * For managed interface, we need to also update the AP
+ * station bandwidth and align the rate scale algorithm
+ * on the bandwidth change. Here we only consider the
+ * bandwidth of the new channel definition (as channel
+ * switch flow does not have the full HT/VHT/HE
+ * information), assuming that if additional changes are
+ * required they would be done as part of the processing
+ * of the next beacon from the AP.
+ */
+ switch (sdata->csa_chandef.width) {
+ case NL80211_CHAN_WIDTH_20_NOHT:
+ case NL80211_CHAN_WIDTH_20:
+ default:
+ bw = IEEE80211_STA_RX_BW_20;
+ break;
+ case NL80211_CHAN_WIDTH_40:
+ bw = IEEE80211_STA_RX_BW_40;
+ break;
+ case NL80211_CHAN_WIDTH_80:
+ bw = IEEE80211_STA_RX_BW_80;
+ break;
+ case NL80211_CHAN_WIDTH_80P80:
+ case NL80211_CHAN_WIDTH_160:
+ bw = IEEE80211_STA_RX_BW_160;
+ break;
+ }
+
+ mgd_sta = sta_info_get(sdata, ifmgd->bssid);
+ sband =
+ local->hw.wiphy->bands[sdata->csa_chandef.chan->band];
+ }
+
+ if (sdata->vif.bss_conf.chandef.width >
+ sdata->csa_chandef.width) {
+ mgd_sta->sta.bandwidth = bw;
+ rate_control_rate_update(local, sband, mgd_sta,
+ IEEE80211_RC_BW_CHANGED);
+ }
+
ret = ieee80211_vif_use_reserved_context(sdata);
if (ret) {
sdata_info(sdata,
@@ -1007,6 +1053,13 @@
goto out;
}
+ if (sdata->vif.bss_conf.chandef.width <
+ sdata->csa_chandef.width) {
+ mgd_sta->sta.bandwidth = bw;
+ rate_control_rate_update(local, sband, mgd_sta,
+ IEEE80211_RC_BW_CHANGED);
+ }
+
goto out;
}
@@ -1229,6 +1282,16 @@
cbss->beacon_interval));
return;
drop_connection:
+ /*
+ * This is just so that the disconnect flow will know that
+ * we were trying to switch channel and failed. In case the
+ * mode is 1 (we are not allowed to Tx), we will know not to
+ * send a deauthentication frame. Those two fields will be
+ * reset when the disconnection worker runs.
+ */
+ sdata->vif.csa_active = true;
+ sdata->csa_block_tx = csa_ie.mode;
+
ieee80211_queue_work(&local->hw, &ifmgd->csa_connection_drop_work);
mutex_unlock(&local->chanctx_mtx);
mutex_unlock(&local->mtx);
@@ -2401,6 +2464,7 @@
struct ieee80211_local *local = sdata->local;
struct ieee80211_if_managed *ifmgd = &sdata->u.mgd;
u8 frame_buf[IEEE80211_DEAUTH_FRAME_LEN];
+ bool tx;
sdata_lock(sdata);
if (!ifmgd->associated) {
@@ -2408,6 +2472,8 @@
return;
}
+ tx = !sdata->csa_block_tx;
+
/* AP is probably out of range (or not reachable for another reason) so
* remove the bss struct for that AP.
*/
@@ -2415,7 +2481,7 @@
ieee80211_set_disassoc(sdata, IEEE80211_STYPE_DEAUTH,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY,
- true, frame_buf);
+ tx, frame_buf);
mutex_lock(&local->mtx);
sdata->vif.csa_active = false;
ifmgd->csa_waiting_bcn = false;
@@ -2426,7 +2492,7 @@
}
mutex_unlock(&local->mtx);
- ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), true,
+ ieee80211_report_disconnect(sdata, frame_buf, sizeof(frame_buf), tx,
WLAN_REASON_DISASSOC_DUE_TO_INACTIVITY);
sdata_unlock(sdata);
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 624d6e4..51b0d83 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -421,7 +421,7 @@
else
ct->status |= IPS_DST_NAT;
- if (nfct_help(ct))
+ if (nfct_help(ct) && !nfct_seqadj(ct))
if (!nfct_seqadj_ext_add(ct))
return NF_DROP;
}
diff --git a/net/netlabel/netlabel_unlabeled.c b/net/netlabel/netlabel_unlabeled.c
index a123d0d..053ba86 100644
--- a/net/netlabel/netlabel_unlabeled.c
+++ b/net/netlabel/netlabel_unlabeled.c
@@ -787,7 +787,8 @@
{
u32 addr_len;
- if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR]) {
+ if (info->attrs[NLBL_UNLABEL_A_IPV4ADDR] &&
+ info->attrs[NLBL_UNLABEL_A_IPV4MASK]) {
addr_len = nla_len(info->attrs[NLBL_UNLABEL_A_IPV4ADDR]);
if (addr_len != sizeof(struct in_addr) &&
addr_len != nla_len(info->attrs[NLBL_UNLABEL_A_IPV4MASK]))
diff --git a/net/wireless/util.c b/net/wireless/util.c
index 13ff407..c934189 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -1310,7 +1310,7 @@
u8 *op_class)
{
u8 vht_opclass;
- u16 freq = chandef->center_freq1;
+ u32 freq = chandef->center_freq1;
if (freq >= 2412 && freq <= 2472) {
if (chandef->width > NL80211_CHAN_WIDTH_40)
diff --git a/sound/aoa/core/gpio-feature.c b/sound/aoa/core/gpio-feature.c
index 7196008..6555742 100644
--- a/sound/aoa/core/gpio-feature.c
+++ b/sound/aoa/core/gpio-feature.c
@@ -88,8 +88,10 @@
}
reg = of_get_property(np, "reg", NULL);
- if (!reg)
+ if (!reg) {
+ of_node_put(np);
return NULL;
+ }
*gpioptr = *reg;
diff --git a/sound/firewire/bebob/bebob.c b/sound/firewire/bebob/bebob.c
index 3469ac14..d0dfa82 100644
--- a/sound/firewire/bebob/bebob.c
+++ b/sound/firewire/bebob/bebob.c
@@ -263,6 +263,8 @@
error:
mutex_unlock(&devices_mutex);
snd_bebob_stream_destroy_duplex(bebob);
+ kfree(bebob->maudio_special_quirk);
+ bebob->maudio_special_quirk = NULL;
snd_card_free(bebob->card);
dev_info(&bebob->unit->device,
"Sound card registration failed: %d\n", err);
diff --git a/sound/firewire/bebob/bebob_maudio.c b/sound/firewire/bebob/bebob_maudio.c
index 07e5abd..6dbf047 100644
--- a/sound/firewire/bebob/bebob_maudio.c
+++ b/sound/firewire/bebob/bebob_maudio.c
@@ -96,17 +96,13 @@
struct fw_device *device = fw_parent_device(unit);
int err, rcode;
u64 date;
- __le32 cues[3] = {
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE1),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE2),
- cpu_to_le32(MAUDIO_BOOTLOADER_CUE3)
- };
+ __le32 *cues;
/* check date of software used to build */
err = snd_bebob_read_block(unit, INFO_OFFSET_SW_DATE,
&date, sizeof(u64));
if (err < 0)
- goto end;
+ return err;
/*
* firmware version 5058 or later has date later than "20070401", but
* 'date' is not null-terminated.
@@ -114,20 +110,28 @@
if (date < 0x3230303730343031LL) {
dev_err(&unit->device,
"Use firmware version 5058 or later\n");
- err = -ENOSYS;
- goto end;
+ return -ENXIO;
}
+ cues = kmalloc_array(3, sizeof(*cues), GFP_KERNEL);
+ if (!cues)
+ return -ENOMEM;
+
+ cues[0] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE1);
+ cues[1] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE2);
+ cues[2] = cpu_to_le32(MAUDIO_BOOTLOADER_CUE3);
+
rcode = fw_run_transaction(device->card, TCODE_WRITE_BLOCK_REQUEST,
device->node_id, device->generation,
device->max_speed, BEBOB_ADDR_REG_REQ,
- cues, sizeof(cues));
+ cues, 3 * sizeof(*cues));
+ kfree(cues);
if (rcode != RCODE_COMPLETE) {
dev_err(&unit->device,
"Failed to send a cue to load firmware\n");
err = -EIO;
}
-end:
+
return err;
}
@@ -290,10 +294,6 @@
bebob->midi_output_ports = 2;
}
end:
- if (err < 0) {
- kfree(params);
- bebob->maudio_special_quirk = NULL;
- }
mutex_unlock(&bebob->mutex);
return err;
}
diff --git a/sound/firewire/digi00x/digi00x.c b/sound/firewire/digi00x/digi00x.c
index 1f5e1d2..ef68999 100644
--- a/sound/firewire/digi00x/digi00x.c
+++ b/sound/firewire/digi00x/digi00x.c
@@ -49,6 +49,7 @@
fw_unit_put(dg00x->unit);
mutex_destroy(&dg00x->mutex);
+ kfree(dg00x);
}
static void dg00x_card_free(struct snd_card *card)
diff --git a/sound/firewire/fireworks/fireworks.c b/sound/firewire/fireworks/fireworks.c
index 71a0613..f2d0733 100644
--- a/sound/firewire/fireworks/fireworks.c
+++ b/sound/firewire/fireworks/fireworks.c
@@ -301,6 +301,8 @@
snd_efw_transaction_remove_instance(efw);
snd_efw_stream_destroy_duplex(efw);
snd_card_free(efw->card);
+ kfree(efw->resp_buf);
+ efw->resp_buf = NULL;
dev_info(&efw->unit->device,
"Sound card registration failed: %d\n", err);
}
diff --git a/sound/firewire/oxfw/oxfw.c b/sound/firewire/oxfw/oxfw.c
index 474b06d..696b6cf 100644
--- a/sound/firewire/oxfw/oxfw.c
+++ b/sound/firewire/oxfw/oxfw.c
@@ -135,6 +135,7 @@
kfree(oxfw->spec);
mutex_destroy(&oxfw->mutex);
+ kfree(oxfw);
}
/*
@@ -212,6 +213,7 @@
static void do_registration(struct work_struct *work)
{
struct snd_oxfw *oxfw = container_of(work, struct snd_oxfw, dwork.work);
+ int i;
int err;
if (oxfw->registered)
@@ -274,7 +276,15 @@
snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->rx_stream);
if (oxfw->has_output)
snd_oxfw_stream_destroy_simplex(oxfw, &oxfw->tx_stream);
+ for (i = 0; i < SND_OXFW_STREAM_FORMAT_ENTRIES; ++i) {
+ kfree(oxfw->tx_stream_formats[i]);
+ oxfw->tx_stream_formats[i] = NULL;
+ kfree(oxfw->rx_stream_formats[i]);
+ oxfw->rx_stream_formats[i] = NULL;
+ }
snd_card_free(oxfw->card);
+ kfree(oxfw->spec);
+ oxfw->spec = NULL;
dev_info(&oxfw->unit->device,
"Sound card registration failed: %d\n", err);
}
diff --git a/sound/firewire/tascam/tascam.c b/sound/firewire/tascam/tascam.c
index 9dc93a7..4c967ac 100644
--- a/sound/firewire/tascam/tascam.c
+++ b/sound/firewire/tascam/tascam.c
@@ -93,6 +93,7 @@
fw_unit_put(tscm->unit);
mutex_destroy(&tscm->mutex);
+ kfree(tscm);
}
static void tscm_card_free(struct snd_card *card)
diff --git a/sound/hda/hdac_controller.c b/sound/hda/hdac_controller.c
index 8761877..00c6af2 100644
--- a/sound/hda/hdac_controller.c
+++ b/sound/hda/hdac_controller.c
@@ -40,6 +40,8 @@
*/
void snd_hdac_bus_init_cmd_io(struct hdac_bus *bus)
{
+ WARN_ON_ONCE(!bus->rb.area);
+
spin_lock_irq(&bus->reg_lock);
/* CORB set up */
bus->corb.addr = bus->rb.addr;
@@ -478,13 +480,15 @@
/* reset controller */
azx_reset(bus, full_reset);
- /* initialize interrupts */
+ /* clear interrupts */
azx_int_clear(bus);
- azx_int_enable(bus);
/* initialize the codec command I/O */
snd_hdac_bus_init_cmd_io(bus);
+ /* enable interrupts after CORB/RIRB buffers are initialized above */
+ azx_int_enable(bus);
+
/* program the position buffer */
if (bus->use_posbuf && bus->posbuf.addr) {
snd_hdac_chip_writel(bus, DPLBASE, (u32)bus->posbuf.addr);
diff --git a/sound/pci/emu10k1/emufx.c b/sound/pci/emu10k1/emufx.c
index 56fc47b..50b216f 100644
--- a/sound/pci/emu10k1/emufx.c
+++ b/sound/pci/emu10k1/emufx.c
@@ -2520,7 +2520,7 @@
emu->support_tlv = 1;
return put_user(SNDRV_EMU10K1_VERSION, (int __user *)argp);
case SNDRV_EMU10K1_IOCTL_INFO:
- info = kmalloc(sizeof(*info), GFP_KERNEL);
+ info = kzalloc(sizeof(*info), GFP_KERNEL);
if (!info)
return -ENOMEM;
snd_emu10k1_fx8010_info(emu, info);
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4e331dd..f913809 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -2349,7 +2349,8 @@
.driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
/* AMD Raven */
{ PCI_DEVICE(0x1022, 0x15e3),
- .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB },
+ .driver_data = AZX_DRIVER_GENERIC | AZX_DCAPS_PRESET_ATI_SB |
+ AZX_DCAPS_PM_RUNTIME },
/* ATI HDMI */
{ PCI_DEVICE(0x1002, 0x0002),
.driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index f03a143..ca29457 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -5698,6 +5698,7 @@
SND_PCI_QUIRK(0x1028, 0x0706, "Dell Inspiron 7559", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
SND_PCI_QUIRK(0x1028, 0x0725, "Dell Inspiron 3162", ALC255_FIXUP_DELL_SPK_NOISE),
SND_PCI_QUIRK(0x1028, 0x075b, "Dell XPS 13 9360", ALC256_FIXUP_DELL_XPS_13_HEADPHONE_NOISE),
+ SND_PCI_QUIRK(0x1028, 0x075c, "Dell XPS 27 7760", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x075d, "Dell AIO", ALC298_FIXUP_SPK_VOLUME),
SND_PCI_QUIRK(0x1028, 0x07b0, "Dell Precision 7520", ALC295_FIXUP_DISABLE_DAC3),
SND_PCI_QUIRK(0x1028, 0x0798, "Dell Inspiron 17 7000 Gaming", ALC256_FIXUP_DELL_INSPIRON_7559_SUBWOOFER),
diff --git a/sound/soc/codecs/cs4265.c b/sound/soc/codecs/cs4265.c
index fd966bb..6e8eb1f 100644
--- a/sound/soc/codecs/cs4265.c
+++ b/sound/soc/codecs/cs4265.c
@@ -157,8 +157,8 @@
SOC_SINGLE("Validity Bit Control Switch", CS4265_SPDIF_CTL2,
3, 1, 0),
SOC_ENUM("SPDIF Mono/Stereo", spdif_mono_stereo_enum),
- SOC_SINGLE("MMTLR Data Switch", 0,
- 1, 1, 0),
+ SOC_SINGLE("MMTLR Data Switch", CS4265_SPDIF_CTL2,
+ 0, 1, 0),
SOC_ENUM("Mono Channel Select", spdif_mono_select_enum),
SND_SOC_BYTES("C Data Buffer", CS4265_C_DATA_BUFF, 24),
};
diff --git a/sound/soc/codecs/sigmadsp.c b/sound/soc/codecs/sigmadsp.c
index d53680a..6df1586 100644
--- a/sound/soc/codecs/sigmadsp.c
+++ b/sound/soc/codecs/sigmadsp.c
@@ -117,8 +117,7 @@
struct sigmadsp_control *ctrl, void *data)
{
/* safeload loads up to 20 bytes in a atomic operation */
- if (ctrl->num_bytes > 4 && ctrl->num_bytes <= 20 && sigmadsp->ops &&
- sigmadsp->ops->safeload)
+ if (ctrl->num_bytes <= 20 && sigmadsp->ops && sigmadsp->ops->safeload)
return sigmadsp->ops->safeload(sigmadsp, ctrl->addr, data,
ctrl->num_bytes);
else
diff --git a/sound/soc/codecs/wm8804-i2c.c b/sound/soc/codecs/wm8804-i2c.c
index f27464c..7954196 100644
--- a/sound/soc/codecs/wm8804-i2c.c
+++ b/sound/soc/codecs/wm8804-i2c.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/i2c.h>
+#include <linux/acpi.h>
#include "wm8804.h"
@@ -40,17 +41,29 @@
};
MODULE_DEVICE_TABLE(i2c, wm8804_i2c_id);
+#if defined(CONFIG_OF)
static const struct of_device_id wm8804_of_match[] = {
{ .compatible = "wlf,wm8804", },
{ }
};
MODULE_DEVICE_TABLE(of, wm8804_of_match);
+#endif
+
+#ifdef CONFIG_ACPI
+static const struct acpi_device_id wm8804_acpi_match[] = {
+ { "1AEC8804", 0 }, /* Wolfson PCI ID + part ID */
+ { "10138804", 0 }, /* Cirrus Logic PCI ID + part ID */
+ { },
+};
+MODULE_DEVICE_TABLE(acpi, wm8804_acpi_match);
+#endif
static struct i2c_driver wm8804_i2c_driver = {
.driver = {
.name = "wm8804",
.pm = &wm8804_pm,
- .of_match_table = wm8804_of_match,
+ .of_match_table = of_match_ptr(wm8804_of_match),
+ .acpi_match_table = ACPI_PTR(wm8804_acpi_match),
},
.probe = wm8804_i2c_probe,
.remove = wm8804_i2c_remove,
diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
index 682c207..b36c856 100644
--- a/sound/soc/soc-dapm.c
+++ b/sound/soc/soc-dapm.c
@@ -3929,6 +3929,13 @@
continue;
}
+ /* let users know there is no DAI to link */
+ if (!dai_w->priv) {
+ dev_dbg(card->dev, "dai widget %s has no DAI\n",
+ dai_w->name);
+ continue;
+ }
+
dai = dai_w->priv;
/* ...find all widgets with the same stream and link them */
diff --git a/tools/arch/x86/include/asm/cpufeatures.h b/tools/arch/x86/include/asm/cpufeatures.h
index fbc1474..f6d1bc9 100644
--- a/tools/arch/x86/include/asm/cpufeatures.h
+++ b/tools/arch/x86/include/asm/cpufeatures.h
@@ -104,7 +104,6 @@
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* has extended APICID (8 bits) */
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* multi-node processor */
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* APERFMPERF */
-/* free, was #define X86_FEATURE_EAGER_FPU ( 3*32+29) * "eagerfpu" Non lazy FPU restore */
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
/* Intel-defined CPU features, CPUID level 0x00000001 (ecx), word 4 */
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
index de477a3..01a288c 100644
--- a/tools/perf/arch/powerpc/util/sym-handling.c
+++ b/tools/perf/arch/powerpc/util/sym-handling.c
@@ -21,15 +21,16 @@
#endif
-#if !defined(_CALL_ELF) || _CALL_ELF != 2
int arch__choose_best_symbol(struct symbol *syma,
struct symbol *symb __maybe_unused)
{
char *sym = syma->name;
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
/* Skip over any initial dot */
if (*sym == '.')
sym++;
+#endif
/* Avoid "SyS" kernel syscall aliases */
if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
@@ -40,6 +41,7 @@
return SYMBOL_A;
}
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
/* Allow matching against dot variants */
int arch__compare_symbol_names(const char *namea, const char *nameb)
{
diff --git a/tools/perf/scripts/python/export-to-postgresql.py b/tools/perf/scripts/python/export-to-postgresql.py
index 7656ff8..c001d5a 100644
--- a/tools/perf/scripts/python/export-to-postgresql.py
+++ b/tools/perf/scripts/python/export-to-postgresql.py
@@ -204,14 +204,23 @@
libpq = CDLL("libpq.so.5")
PQconnectdb = libpq.PQconnectdb
PQconnectdb.restype = c_void_p
+PQconnectdb.argtypes = [ c_char_p ]
PQfinish = libpq.PQfinish
+PQfinish.argtypes = [ c_void_p ]
PQstatus = libpq.PQstatus
+PQstatus.restype = c_int
+PQstatus.argtypes = [ c_void_p ]
PQexec = libpq.PQexec
PQexec.restype = c_void_p
+PQexec.argtypes = [ c_void_p, c_char_p ]
PQresultStatus = libpq.PQresultStatus
+PQresultStatus.restype = c_int
+PQresultStatus.argtypes = [ c_void_p ]
PQputCopyData = libpq.PQputCopyData
+PQputCopyData.restype = c_int
PQputCopyData.argtypes = [ c_void_p, c_void_p, c_int ]
PQputCopyEnd = libpq.PQputCopyEnd
+PQputCopyEnd.restype = c_int
PQputCopyEnd.argtypes = [ c_void_p, c_void_p ]
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
index f55d108..3be8c48 100644
--- a/tools/perf/util/evsel.c
+++ b/tools/perf/util/evsel.c
@@ -241,8 +241,9 @@
{
struct perf_evsel *evsel = zalloc(perf_evsel__object.size);
- if (evsel != NULL)
- perf_evsel__init(evsel, attr, idx);
+ if (!evsel)
+ return NULL;
+ perf_evsel__init(evsel, attr, idx);
if (perf_evsel__is_bpf_output(evsel)) {
evsel->attr.sample_type |= (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
diff --git a/tools/testing/selftests/efivarfs/config b/tools/testing/selftests/efivarfs/config
new file mode 100644
index 0000000..4e151f1
--- /dev/null
+++ b/tools/testing/selftests/efivarfs/config
@@ -0,0 +1 @@
+CONFIG_EFIVAR_FS=y
diff --git a/tools/testing/selftests/memory-hotplug/config b/tools/testing/selftests/memory-hotplug/config
index 2fde301..a7e8cd5 100644
--- a/tools/testing/selftests/memory-hotplug/config
+++ b/tools/testing/selftests/memory-hotplug/config
@@ -2,3 +2,4 @@
CONFIG_MEMORY_HOTPLUG_SPARSE=y
CONFIG_NOTIFIER_ERROR_INJECTION=y
CONFIG_MEMORY_NOTIFIER_ERROR_INJECT=m
+CONFIG_MEMORY_HOTREMOVE=y
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index e92903f..6d5bcba 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -155,12 +155,6 @@
};
-static const char * const debugfs_known_mountpoints[] = {
- "/sys/kernel/debug",
- "/debug",
- 0,
-};
-
/*
* data structures
*/
diff --git a/tools/vm/slabinfo.c b/tools/vm/slabinfo.c
index b9d34b3..6975ec4 100644
--- a/tools/vm/slabinfo.c
+++ b/tools/vm/slabinfo.c
@@ -29,8 +29,8 @@
int alias;
int refs;
int aliases, align, cache_dma, cpu_slabs, destroy_by_rcu;
- int hwcache_align, object_size, objs_per_slab;
- int sanity_checks, slab_size, store_user, trace;
+ unsigned int hwcache_align, object_size, objs_per_slab;
+ unsigned int sanity_checks, slab_size, store_user, trace;
int order, poison, reclaim_account, red_zone;
unsigned long partial, objects, slabs, objects_partial, objects_total;
unsigned long alloc_fastpath, alloc_slowpath;